repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/crimson/crush/CrushLocation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <map>
#include <string>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
#include <seastar/core/seastar.hh>
namespace crimson::crush {
class CrushLocation {
public:
explicit CrushLocation() {
}
seastar::future<> update_from_conf(); ///< refresh from config
seastar::future<> init_on_startup();
seastar::future<> update_from_hook(); ///< call hook, if present
std::multimap<std::string, std::string> get_location() const;
private:
void _parse(const std::string& s);
std::multimap<std::string, std::string> loc;
};
std::ostream& operator<<(std::ostream& os, const CrushLocation& loc);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::crush::CrushLocation> : fmt::ostream_formatter {};
#endif
| 884 | 22.289474 | 93 | h |
null | ceph-main/src/crimson/mgr/client.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "client.h"
#include <seastar/core/sleep.hh>
#include "crimson/common/log.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Messenger.h"
#include "messages/MMgrConfigure.h"
#include "messages/MMgrMap.h"
#include "messages/MMgrOpen.h"
namespace {
seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_mgrc);
}
}
using crimson::common::local_conf;
namespace crimson::mgr
{
Client::Client(crimson::net::Messenger& msgr,
WithStats& with_stats)
: msgr{msgr},
with_stats{with_stats},
report_timer{[this] {report();}}
{}
seastar::future<> Client::start()
{
return seastar::now();
}
seastar::future<> Client::stop()
{
logger().info("{}", __func__);
report_timer.cancel();
auto fut = gate.close();
if (conn) {
conn->mark_down();
}
return fut;
}
std::optional<seastar::future<>>
Client::ms_dispatch(crimson::net::ConnectionRef conn, MessageRef m)
{
bool dispatched = true;
gate.dispatch_in_background(__func__, *this, [this, conn, &m, &dispatched] {
switch(m->get_type()) {
case MSG_MGR_MAP:
return handle_mgr_map(conn, boost::static_pointer_cast<MMgrMap>(m));
case MSG_MGR_CONFIGURE:
return handle_mgr_conf(conn, boost::static_pointer_cast<MMgrConfigure>(m));
default:
dispatched = false;
return seastar::now();
}
});
return (dispatched ? std::make_optional(seastar::now()) : std::nullopt);
}
void Client::ms_handle_connect(
crimson::net::ConnectionRef c,
seastar::shard_id new_shard)
{
ceph_assert_always(new_shard == seastar::this_shard_id());
gate.dispatch_in_background(__func__, *this, [this, c] {
if (conn == c) {
// ask for the mgrconfigure message
auto m = crimson::make_message<MMgrOpen>();
m->daemon_name = local_conf()->name.get_id();
return conn->send(std::move(m));
} else {
return seastar::now();
}
});
}
void Client::ms_handle_reset(crimson::net::ConnectionRef c, bool /* is_replace */)
{
gate.dispatch_in_background(__func__, *this, [this, c] {
if (conn == c) {
report_timer.cancel();
return reconnect();
} else {
return seastar::now();
}
});
}
seastar::future<> Client::reconnect()
{
if (conn) {
conn->mark_down();
conn = {};
}
if (!mgrmap.get_available()) {
logger().warn("No active mgr available yet");
return seastar::now();
}
auto retry_interval = std::chrono::duration<double>(
local_conf().get_val<double>("mgr_connect_retry_interval"));
auto a_while = std::chrono::duration_cast<seastar::steady_clock_type::duration>(
retry_interval);
return seastar::sleep(a_while).then([this] {
auto peer = mgrmap.get_active_addrs().pick_addr(msgr.get_myaddr().get_type());
if (peer == entity_addr_t{}) {
// crimson msgr only uses the first bound addr
logger().error("mgr.{} does not have an addr compatible with me",
mgrmap.get_active_name());
return;
}
conn = msgr.connect(peer, CEPH_ENTITY_TYPE_MGR);
});
}
seastar::future<> Client::handle_mgr_map(crimson::net::ConnectionRef,
Ref<MMgrMap> m)
{
mgrmap = m->get_map();
if (!conn) {
return reconnect();
} else if (conn->get_peer_addr() !=
mgrmap.get_active_addrs().legacy_addr()) {
return reconnect();
} else {
return seastar::now();
}
}
seastar::future<> Client::handle_mgr_conf(crimson::net::ConnectionRef,
Ref<MMgrConfigure> m)
{
logger().info("{} {}", __func__, *m);
auto report_period = std::chrono::seconds{m->stats_period};
if (report_period.count()) {
if (report_timer.armed()) {
report_timer.rearm(report_timer.get_timeout(), report_period);
} else {
report_timer.arm_periodic(report_period);
}
} else {
report_timer.cancel();
}
return seastar::now();
}
void Client::report()
{
gate.dispatch_in_background(__func__, *this, [this] {
if (!conn) {
logger().warn("report: no conn available; raport skipped");
return seastar::now();
}
return with_stats.get_stats(
).then([this](auto &&pg_stats) {
return conn->send(std::move(pg_stats));
});
});
}
void Client::print(std::ostream& out) const
{
out << "mgrc ";
}
}
| 4,411 | 24.211429 | 82 | cc |
null | ceph-main/src/crimson/mgr/client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/timer.hh>
#include "crimson/common/gated.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Fwd.h"
#include "mon/MgrMap.h"
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
namespace crimson::net {
class Messenger;
}
class MMgrMap;
class MMgrConfigure;
namespace crimson::mgr
{
// implement WithStats if you want to report stats to mgr periodically
class WithStats {
public:
virtual seastar::future<MessageURef> get_stats() const = 0;
virtual ~WithStats() {}
};
class Client : public crimson::net::Dispatcher {
public:
Client(crimson::net::Messenger& msgr,
WithStats& with_stats);
seastar::future<> start();
seastar::future<> stop();
void report();
private:
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef conn, Ref<Message> m) override;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) final;
void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id) final;
seastar::future<> handle_mgr_map(crimson::net::ConnectionRef conn,
Ref<MMgrMap> m);
seastar::future<> handle_mgr_conf(crimson::net::ConnectionRef conn,
Ref<MMgrConfigure> m);
seastar::future<> reconnect();
void print(std::ostream&) const;
friend std::ostream& operator<<(std::ostream& out, const Client& client);
private:
MgrMap mgrmap;
crimson::net::Messenger& msgr;
WithStats& with_stats;
crimson::net::ConnectionRef conn;
seastar::timer<seastar::lowres_clock> report_timer;
crimson::common::Gated gate;
};
inline std::ostream& operator<<(std::ostream& out, const Client& client) {
client.print(out);
return out;
}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::mgr::Client> : fmt::ostream_formatter {};
#endif
| 1,908 | 25.887324 | 84 | h |
null | ceph-main/src/crimson/mon/MonClient.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "MonClient.h"
#include <random>
#include <fmt/ranges.h>
#include <seastar/core/future-util.hh>
#include <seastar/core/lowres_clock.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/util/log.hh>
#include "auth/AuthClientHandler.h"
#include "auth/RotatingKeyRing.h"
#include "common/hostname.h"
#include "crimson/auth/KeyRing.h"
#include "crimson/common/config_proxy.h"
#include "crimson/common/log.h"
#include "crimson/common/logclient.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Errors.h"
#include "crimson/net/Messenger.h"
#include "messages/MAuth.h"
#include "messages/MAuthReply.h"
#include "messages/MConfig.h"
#include "messages/MLogAck.h"
#include "messages/MMonCommand.h"
#include "messages/MMonCommandAck.h"
#include "messages/MMonGetMap.h"
#include "messages/MMonGetVersion.h"
#include "messages/MMonGetVersionReply.h"
#include "messages/MMonMap.h"
#include "messages/MMonSubscribe.h"
#include "messages/MMonSubscribeAck.h"
using std::string;
using std::tuple;
using std::vector;
namespace {
seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_monc);
}
}
namespace crimson::mon {
using crimson::common::local_conf;
class Connection : public seastar::enable_shared_from_this<Connection> {
public:
Connection(const AuthRegistry& auth_registry,
crimson::net::ConnectionRef conn,
KeyRing* keyring);
enum class auth_result_t {
success = 0,
failure,
canceled
};
seastar::future<> handle_auth_reply(Ref<MAuthReply> m);
// v2
seastar::future<auth_result_t> authenticate_v2();
auth::AuthClient::auth_request_t
get_auth_request(const EntityName& name,
uint32_t want_keys);
using secret_t = string;
tuple<CryptoKey, secret_t, bufferlist>
handle_auth_reply_more(const ceph::buffer::list& bl);
int handle_auth_bad_method(uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes);
tuple<CryptoKey, secret_t, int>
handle_auth_done(uint64_t new_global_id,
const ceph::buffer::list& bl);
void close();
bool is_my_peer(const entity_addr_t& addr) const;
AuthAuthorizer* get_authorizer(entity_type_t peer) const;
KeyStore& get_keys();
seastar::future<> renew_tickets();
seastar::future<> renew_rotating_keyring();
crimson::net::ConnectionRef get_conn();
private:
std::unique_ptr<AuthClientHandler> create_auth(crimson::auth::method_t,
uint64_t global_id,
const EntityName& name,
uint32_t want_keys);
enum class request_t {
rotating,
general,
};
seastar::future<std::optional<auth_result_t>> do_auth_single(request_t);
seastar::future<auth_result_t> do_auth(request_t);
private:
bool closed = false;
seastar::shared_promise<Ref<MAuthReply>> auth_reply;
// v2
using clock_t = seastar::lowres_system_clock;
clock_t::time_point auth_start;
crimson::auth::method_t auth_method = 0;
std::optional<seastar::promise<auth_result_t>> auth_done;
const AuthRegistry& auth_registry;
crimson::net::ConnectionRef conn;
std::unique_ptr<AuthClientHandler> auth;
std::unique_ptr<RotatingKeyRing> rotating_keyring;
uint64_t global_id = 0;
clock_t::time_point last_rotating_renew_sent;
};
Connection::Connection(const AuthRegistry& auth_registry,
crimson::net::ConnectionRef conn,
KeyRing* keyring)
: auth_registry{auth_registry},
conn{conn},
rotating_keyring{
std::make_unique<RotatingKeyRing>(nullptr,
CEPH_ENTITY_TYPE_OSD,
keyring)}
{}
seastar::future<> Connection::handle_auth_reply(Ref<MAuthReply> m)
{
logger().info("{}", __func__);
ceph_assert(m);
auth_reply.set_value(m);
auth_reply = {};
return seastar::now();
}
seastar::future<> Connection::renew_tickets()
{
if (auth->need_tickets()) {
logger().info("{}: retrieving new tickets", __func__);
return do_auth(request_t::general).then([](const auth_result_t r) {
if (r == auth_result_t::failure) {
logger().info("renew_tickets: ignoring failed auth reply");
}
});
} else {
logger().debug("{}: don't need new tickets", __func__);
return seastar::now();
}
}
seastar::future<> Connection::renew_rotating_keyring()
{
auto now = clock_t::now();
auto ttl = std::chrono::seconds{
static_cast<long>(crimson::common::local_conf()->auth_service_ticket_ttl)};
auto cutoff = utime_t{now - std::min(std::chrono::seconds{30}, ttl / 4)};
if (!rotating_keyring->need_new_secrets(cutoff)) {
logger().debug("renew_rotating_keyring secrets are up-to-date "
"(they expire after {})", cutoff);
return seastar::now();
} else {
logger().info("renew_rotating_keyring renewing rotating keys "
" (they expired before {})", cutoff);
}
if ((now > last_rotating_renew_sent) &&
(now - last_rotating_renew_sent < std::chrono::seconds{1})) {
logger().info("renew_rotating_keyring called too often (last: {})",
utime_t{last_rotating_renew_sent});
return seastar::now();
}
last_rotating_renew_sent = now;
return do_auth(request_t::rotating).then([](const auth_result_t r) {
if (r == auth_result_t::failure) {
logger().info("renew_rotating_keyring: ignoring failed auth reply");
}
});
}
AuthAuthorizer* Connection::get_authorizer(entity_type_t peer) const
{
if (auth) {
return auth->build_authorizer(peer);
} else {
return nullptr;
}
}
KeyStore& Connection::get_keys() {
return *rotating_keyring;
}
std::unique_ptr<AuthClientHandler>
Connection::create_auth(crimson::auth::method_t protocol,
uint64_t global_id,
const EntityName& name,
uint32_t want_keys)
{
static crimson::common::CephContext cct;
std::unique_ptr<AuthClientHandler> auth;
auth.reset(AuthClientHandler::create(&cct,
protocol,
rotating_keyring.get()));
if (!auth) {
logger().error("no handler for protocol {}", protocol);
throw std::system_error(make_error_code(
crimson::net::error::negotiation_failure));
}
auth->init(name);
auth->set_want_keys(want_keys);
auth->set_global_id(global_id);
return auth;
}
seastar::future<std::optional<Connection::auth_result_t>>
Connection::do_auth_single(Connection::request_t what)
{
auto m = crimson::make_message<MAuth>();
m->protocol = auth->get_protocol();
auth->prepare_build_request();
switch (what) {
case request_t::rotating:
auth->build_rotating_request(m->auth_payload);
break;
case request_t::general:
if (int ret = auth->build_request(m->auth_payload); ret) {
logger().error("missing/bad key for '{}'", local_conf()->name);
throw std::system_error(make_error_code(
crimson::net::error::negotiation_failure));
}
break;
default:
assert(0);
}
logger().info("sending {}", *m);
return conn->send(std::move(m)).then([this] {
logger().info("waiting");
return auth_reply.get_shared_future();
}).then([this, life_extender=shared_from_this()] (Ref<MAuthReply> m) {
if (!m) {
ceph_assert(closed);
logger().info("do_auth_single: connection closed");
return std::make_optional(auth_result_t::canceled);
}
logger().info("do_auth_single: {} returns {}: {}",
*conn, *m, m->result);
auto p = m->result_bl.cbegin();
auto ret = auth->handle_response(m->result, p,
nullptr, nullptr);
std::optional<Connection::auth_result_t> auth_result;
switch (ret) {
case -EAGAIN:
auth_result = std::nullopt;
break;
case 0:
auth_result = auth_result_t::success;
break;
default:
auth_result = auth_result_t::failure;
logger().error(
"do_auth_single: got error {} on mon {}",
ret, conn->get_peer_addr());
break;
}
return auth_result;
});
}
seastar::future<Connection::auth_result_t>
Connection::do_auth(Connection::request_t what) {
return seastar::repeat_until_value(
[this, life_extender=shared_from_this(), what]() {
return do_auth_single(what);
});
}
seastar::future<Connection::auth_result_t> Connection::authenticate_v2()
{
auth_start = seastar::lowres_system_clock::now();
return conn->send(crimson::make_message<MMonGetMap>()).then([this] {
auth_done.emplace();
return auth_done->get_future();
});
}
auth::AuthClient::auth_request_t
Connection::get_auth_request(const EntityName& entity_name,
uint32_t want_keys)
{
// choose method
auth_method = [&] {
std::vector<crimson::auth::method_t> methods;
auth_registry.get_supported_methods(conn->get_peer_type(), &methods);
if (methods.empty()) {
logger().info("get_auth_request no methods is supported");
throw crimson::auth::error("no methods is supported");
}
return methods.front();
}();
std::vector<uint32_t> modes;
auth_registry.get_supported_modes(conn->get_peer_type(), auth_method,
&modes);
logger().info("method {} preferred_modes {}", auth_method, modes);
if (modes.empty()) {
throw crimson::auth::error("no modes is supported");
}
auth = create_auth(auth_method, global_id, entity_name, want_keys);
using ceph::encode;
bufferlist bl;
// initial request includes some boilerplate...
encode((char)AUTH_MODE_MON, bl);
encode(entity_name, bl);
encode(global_id, bl);
// and (maybe) some method-specific initial payload
auth->build_initial_request(&bl);
return {auth_method, modes, bl};
}
tuple<CryptoKey, Connection::secret_t, bufferlist>
Connection::handle_auth_reply_more(const ceph::buffer::list& payload)
{
CryptoKey session_key;
secret_t connection_secret;
bufferlist reply;
auto p = payload.cbegin();
int r = auth->handle_response(0, p, &session_key, &connection_secret);
if (r == -EAGAIN) {
auth->prepare_build_request();
auth->build_request(reply);
logger().info(" responding with {} bytes", reply.length());
return {session_key, connection_secret, reply};
} else if (r < 0) {
logger().error(" handle_response returned {}", r);
throw crimson::auth::error("unable to build auth");
} else {
logger().info("authenticated!");
std::terminate();
}
}
tuple<CryptoKey, Connection::secret_t, int>
Connection::handle_auth_done(uint64_t new_global_id,
const ceph::buffer::list& payload)
{
global_id = new_global_id;
auth->set_global_id(global_id);
auto p = payload.begin();
CryptoKey session_key;
secret_t connection_secret;
int r = auth->handle_response(0, p, &session_key, &connection_secret);
conn->set_last_keepalive_ack(auth_start);
if (auth_done) {
auth_done->set_value(auth_result_t::success);
auth_done.reset();
}
return {session_key, connection_secret, r};
}
int Connection::handle_auth_bad_method(uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes)
{
logger().info("old_auth_method {} result {} allowed_methods {}",
old_auth_method, cpp_strerror(result), allowed_methods);
std::vector<uint32_t> auth_supported;
auth_registry.get_supported_methods(conn->get_peer_type(), &auth_supported);
auto p = std::find(auth_supported.begin(), auth_supported.end(),
old_auth_method);
assert(p != auth_supported.end());
p = std::find_first_of(std::next(p), auth_supported.end(),
allowed_methods.begin(), allowed_methods.end());
if (p == auth_supported.end()) {
logger().error("server allowed_methods {} but i only support {}",
allowed_methods, auth_supported);
assert(auth_done);
auth_done->set_exception(std::system_error(make_error_code(
crimson::net::error::negotiation_failure)));
return -EACCES;
}
auth_method = *p;
logger().info("will try {} next", auth_method);
return 0;
}
void Connection::close()
{
logger().info("{}", __func__);
auth_reply.set_value(Ref<MAuthReply>(nullptr));
auth_reply = {};
if (auth_done) {
auth_done->set_value(auth_result_t::canceled);
auth_done.reset();
}
if (conn && !std::exchange(closed, true)) {
conn->mark_down();
}
}
bool Connection::is_my_peer(const entity_addr_t& addr) const
{
ceph_assert(conn);
return conn->get_peer_addr() == addr;
}
crimson::net::ConnectionRef Connection::get_conn() {
return conn;
}
Client::mon_command_t::mon_command_t(MURef<MMonCommand> req)
: req(std::move(req))
{}
Client::Client(crimson::net::Messenger& messenger,
crimson::common::AuthHandler& auth_handler)
// currently, crimson is OSD-only
: want_keys{CEPH_ENTITY_TYPE_MON |
CEPH_ENTITY_TYPE_OSD |
CEPH_ENTITY_TYPE_MGR},
timer{[this] { tick(); }},
msgr{messenger},
log_client{nullptr},
auth_registry{&cct},
auth_handler{auth_handler}
{}
Client::Client(Client&&) = default;
Client::~Client() = default;
seastar::future<> Client::start() {
entity_name = crimson::common::local_conf()->name;
auth_registry.refresh_config();
return load_keyring().then([this] {
return monmap.build_initial(crimson::common::local_conf(), false);
}).then([this] {
return authenticate();
}).then([this] {
auto interval =
std::chrono::duration_cast<seastar::lowres_clock::duration>(
std::chrono::duration<double>(
local_conf().get_val<double>("mon_client_ping_interval")));
timer.arm_periodic(interval);
});
}
seastar::future<> Client::load_keyring()
{
if (!auth_registry.is_supported_method(msgr.get_mytype(), CEPH_AUTH_CEPHX)) {
return seastar::now();
} else {
return crimson::auth::load_from_keyring(&keyring).then([](KeyRing* keyring) {
return crimson::auth::load_from_keyfile(keyring);
}).then([](KeyRing* keyring) {
return crimson::auth::load_from_key(keyring);
}).then([](KeyRing*) {
return seastar::now();
});
}
}
void Client::tick()
{
gate.dispatch_in_background(__func__, *this, [this] {
if (active_con) {
return seastar::when_all_succeed(wait_for_send_log(),
active_con->get_conn()->send_keepalive(),
active_con->renew_tickets(),
active_con->renew_rotating_keyring()).discard_result();
} else {
assert(is_hunting());
logger().info("{} continuing the hunt", __func__);
return authenticate();
}
});
}
seastar::future<> Client::wait_for_send_log() {
utime_t now = ceph_clock_now();
if (now > last_send_log + cct._conf->mon_client_log_interval) {
last_send_log = now;
return send_log(log_flushing_t::NO_FLUSH);
}
return seastar::now();
}
seastar::future<> Client::send_log(log_flushing_t flush_flag) {
if (log_client) {
if (auto lm = log_client->get_mon_log_message(flush_flag); lm) {
return send_message(std::move(lm));
}
more_log_pending = log_client->are_pending();
}
return seastar::now();
}
bool Client::is_hunting() const {
return !active_con;
}
std::optional<seastar::future<>>
Client::ms_dispatch(crimson::net::ConnectionRef conn, MessageRef m)
{
bool dispatched = true;
gate.dispatch_in_background(__func__, *this, [this, conn, &m, &dispatched] {
// we only care about these message types
switch (m->get_type()) {
case CEPH_MSG_MON_MAP:
return handle_monmap(*conn, boost::static_pointer_cast<MMonMap>(m));
case CEPH_MSG_AUTH_REPLY:
return handle_auth_reply(
*conn, boost::static_pointer_cast<MAuthReply>(m));
case CEPH_MSG_MON_SUBSCRIBE_ACK:
return handle_subscribe_ack(
boost::static_pointer_cast<MMonSubscribeAck>(m));
case CEPH_MSG_MON_GET_VERSION_REPLY:
return handle_get_version_reply(
boost::static_pointer_cast<MMonGetVersionReply>(m));
case MSG_MON_COMMAND_ACK:
return handle_mon_command_ack(
boost::static_pointer_cast<MMonCommandAck>(m));
case MSG_LOGACK:
return handle_log_ack(
boost::static_pointer_cast<MLogAck>(m));
case MSG_CONFIG:
return handle_config(
boost::static_pointer_cast<MConfig>(m));
default:
dispatched = false;
return seastar::now();
}
});
return (dispatched ? std::make_optional(seastar::now()) : std::nullopt);
}
void Client::ms_handle_reset(crimson::net::ConnectionRef conn, bool /* is_replace */)
{
gate.dispatch_in_background(__func__, *this, [this, conn] {
auto found = std::find_if(pending_conns.begin(), pending_conns.end(),
[peer_addr = conn->get_peer_addr()](auto& mc) {
return mc->is_my_peer(peer_addr);
});
if (found != pending_conns.end()) {
logger().warn("pending conn reset by {}", conn->get_peer_addr());
(*found)->close();
pending_conns.erase(found);
return seastar::now();
} else if (active_con && active_con->is_my_peer(conn->get_peer_addr())) {
logger().warn("active conn reset {}", conn->get_peer_addr());
return reopen_session(-1).then([this](bool opened) {
if (opened) {
return on_session_opened();
} else {
return seastar::now();
}
});
} else {
return seastar::now();
}
});
}
std::pair<std::vector<uint32_t>, std::vector<uint32_t>>
Client::get_supported_auth_methods(int peer_type)
{
std::vector<uint32_t> methods;
std::vector<uint32_t> modes;
auth_registry.get_supported_methods(peer_type, &methods, &modes);
return {methods, modes};
}
uint32_t Client::pick_con_mode(int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes)
{
return auth_registry.pick_mode(peer_type, auth_method, preferred_modes);
}
AuthAuthorizeHandler* Client::get_auth_authorize_handler(int peer_type,
int auth_method)
{
return auth_registry.get_handler(peer_type, auth_method);
}
int Client::handle_auth_request(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
bool more,
uint32_t auth_method,
const ceph::bufferlist& payload,
uint64_t *p_peer_global_id,
ceph::bufferlist *reply)
{
if (payload.length() == 0) {
return -EACCES;
}
auth_meta.auth_mode = payload[0];
if (auth_meta.auth_mode < AUTH_MODE_AUTHORIZER ||
auth_meta.auth_mode > AUTH_MODE_AUTHORIZER_MAX) {
return -EACCES;
}
AuthAuthorizeHandler* ah = get_auth_authorize_handler(conn.get_peer_type(),
auth_method);
if (!ah) {
logger().error("no AuthAuthorizeHandler found for auth method: {}",
auth_method);
return -EOPNOTSUPP;
}
auto authorizer_challenge = &auth_meta.authorizer_challenge;
if (auth_meta.skip_authorizer_challenge) {
logger().info("skipping challenge on {}", conn);
authorizer_challenge = nullptr;
}
if (!active_con) {
logger().info("auth request during inactivity period");
// let's instruct the client to come back later
return -EBUSY;
}
bool was_challenge = (bool)auth_meta.authorizer_challenge;
EntityName name;
AuthCapsInfo caps_info;
bool is_valid = ah->verify_authorizer(
&cct,
active_con->get_keys(),
payload,
auth_meta.get_connection_secret_length(),
reply,
&name,
p_peer_global_id,
&caps_info,
&auth_meta.session_key,
&auth_meta.connection_secret,
authorizer_challenge);
if (is_valid) {
auth_handler.handle_authentication(name, caps_info);
return 1;
}
if (!more && !was_challenge && auth_meta.authorizer_challenge) {
logger().info("added challenge on {}", conn);
return 0;
} else {
logger().info("bad authorizer on {}", conn);
return -EACCES;
}
}
auth::AuthClient::auth_request_t
Client::get_auth_request(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta)
{
logger().info("get_auth_request(conn={}, auth_method={})",
conn, auth_meta.auth_method);
// connection to mon?
if (conn.get_peer_type() == CEPH_ENTITY_TYPE_MON) {
auto found = std::find_if(pending_conns.begin(), pending_conns.end(),
[peer_addr = conn.get_peer_addr()](auto& mc) {
return mc->is_my_peer(peer_addr);
});
if (found == pending_conns.end()) {
throw crimson::auth::error{"unknown connection"};
}
return (*found)->get_auth_request(entity_name, want_keys);
} else {
// generate authorizer
if (!active_con) {
logger().error(" but no auth handler is set up");
throw crimson::auth::error("no auth available");
}
auto authorizer = active_con->get_authorizer(conn.get_peer_type());
if (!authorizer) {
logger().error("failed to build_authorizer for type {}",
ceph_entity_type_name(conn.get_peer_type()));
throw crimson::auth::error("unable to build auth");
}
auth_meta.authorizer.reset(authorizer);
auth_meta.auth_method = authorizer->protocol;
vector<uint32_t> modes;
auth_registry.get_supported_modes(conn.get_peer_type(),
auth_meta.auth_method,
&modes);
return {authorizer->protocol, modes, authorizer->bl};
}
}
ceph::bufferlist Client::handle_auth_reply_more(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
const bufferlist& bl)
{
if (conn.get_peer_type() == CEPH_ENTITY_TYPE_MON) {
auto found = std::find_if(pending_conns.begin(), pending_conns.end(),
[peer_addr = conn.get_peer_addr()](auto& mc) {
return mc->is_my_peer(peer_addr);
});
if (found == pending_conns.end()) {
throw crimson::auth::error{"unknown connection"};
}
bufferlist reply;
tie(auth_meta.session_key, auth_meta.connection_secret, reply) =
(*found)->handle_auth_reply_more(bl);
return reply;
} else {
// authorizer challenges
if (!active_con || !auth_meta.authorizer) {
logger().error("no authorizer?");
throw crimson::auth::error("no auth available");
}
auth_meta.authorizer->add_challenge(&cct, bl);
return auth_meta.authorizer->bl;
}
}
int Client::handle_auth_done(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint64_t global_id,
uint32_t /*con_mode*/,
const bufferlist& bl)
{
if (conn.get_peer_type() == CEPH_ENTITY_TYPE_MON) {
auto found = std::find_if(pending_conns.begin(), pending_conns.end(),
[peer_addr = conn.get_peer_addr()](auto& mc) {
return mc->is_my_peer(peer_addr);
});
if (found == pending_conns.end()) {
return -ENOENT;
}
int r = 0;
tie(auth_meta.session_key, auth_meta.connection_secret, r) =
(*found)->handle_auth_done(global_id, bl);
return r;
} else {
// verify authorizer reply
auto p = bl.begin();
if (!auth_meta.authorizer->verify_reply(p, &auth_meta.connection_secret)) {
logger().error("failed verifying authorizer reply");
return -EACCES;
}
auth_meta.session_key = auth_meta.authorizer->session_key;
return 0;
}
}
// Handle server's indication that the previous auth attempt failed
int Client::handle_auth_bad_method(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes)
{
if (conn.get_peer_type() == CEPH_ENTITY_TYPE_MON) {
auto found = std::find_if(pending_conns.begin(), pending_conns.end(),
[peer_addr = conn.get_peer_addr()](auto& mc) {
return mc->is_my_peer(peer_addr);
});
if (found != pending_conns.end()) {
return (*found)->handle_auth_bad_method(
old_auth_method, result,
allowed_methods, allowed_modes);
} else {
return -ENOENT;
}
} else {
// huh...
logger().info("hmm, they didn't like {} result {}",
old_auth_method, cpp_strerror(result));
return -EACCES;
}
}
seastar::future<> Client::handle_monmap(crimson::net::Connection &conn,
Ref<MMonMap> m)
{
monmap.decode(m->monmapbl);
const auto peer_addr = conn.get_peer_addr();
auto cur_mon = monmap.get_name(peer_addr);
logger().info("got monmap {}, mon.{}, is now rank {}",
monmap.epoch, cur_mon, monmap.get_rank(cur_mon));
sub.got("monmap", monmap.get_epoch());
if (monmap.get_addr_name(peer_addr, cur_mon)) {
if (active_con) {
logger().info("handle_monmap: renewing tickets");
return seastar::when_all_succeed(
active_con->renew_tickets(),
active_con->renew_rotating_keyring()).then_unpack([] {
logger().info("handle_mon_map: renewed tickets");
});
} else {
return seastar::now();
}
} else {
logger().warn("mon.{} went away", cur_mon);
return reopen_session(-1).then([this](bool opened) {
if (opened) {
return on_session_opened();
} else {
return seastar::now();
}
});
}
}
seastar::future<> Client::handle_auth_reply(crimson::net::Connection &conn,
Ref<MAuthReply> m)
{
logger().info("handle_auth_reply {} returns {}: {}",
conn, *m, m->result);
auto found = std::find_if(pending_conns.begin(), pending_conns.end(),
[peer_addr = conn.get_peer_addr()](auto& mc) {
return mc->is_my_peer(peer_addr);
});
if (found != pending_conns.end()) {
return (*found)->handle_auth_reply(m);
} else if (active_con) {
return active_con->handle_auth_reply(m).then([this] {
return seastar::when_all_succeed(
active_con->renew_rotating_keyring(),
active_con->renew_tickets()).discard_result();
});
} else {
logger().error("unknown auth reply from {}", conn.get_peer_addr());
return seastar::now();
}
}
seastar::future<> Client::handle_subscribe_ack(Ref<MMonSubscribeAck> m)
{
sub.acked(m->interval);
return seastar::now();
}
Client::get_version_t Client::get_version(const std::string& map)
{
auto m = crimson::make_message<MMonGetVersion>();
auto tid = ++last_version_req_id;
m->handle = tid;
m->what = map;
auto& req = version_reqs[tid];
return send_message(std::move(m)).then([&req] {
return req.get_future();
});
}
seastar::future<>
Client::handle_get_version_reply(Ref<MMonGetVersionReply> m)
{
if (auto found = version_reqs.find(m->handle);
found != version_reqs.end()) {
auto& result = found->second;
logger().trace("{}: {} returns {}",
__func__, m->handle, m->version);
result.set_value(std::make_tuple(m->version, m->oldest_version));
version_reqs.erase(found);
} else {
logger().warn("{}: version request with handle {} not found",
__func__, m->handle);
}
return seastar::now();
}
seastar::future<> Client::handle_mon_command_ack(Ref<MMonCommandAck> m)
{
const auto tid = m->get_tid();
if (auto found = std::find_if(mon_commands.begin(),
mon_commands.end(),
[tid](auto& cmd) {
return cmd.req->get_tid() == tid;
});
found != mon_commands.end()) {
auto& command = *found;
logger().trace("{} {}", __func__, tid);
command.result.set_value(std::make_tuple(m->r, m->rs, std::move(m->get_data())));
mon_commands.erase(found);
} else {
logger().warn("{} {} not found", __func__, tid);
}
return seastar::now();
}
seastar::future<> Client::handle_log_ack(Ref<MLogAck> m)
{
if (log_client) {
return log_client->handle_log_ack(m).then([this] {
if (more_log_pending) {
return send_log(log_flushing_t::NO_FLUSH);
} else {
return seastar::now();
}
});
}
return seastar::now();
}
seastar::future<> Client::handle_config(Ref<MConfig> m)
{
return crimson::common::local_conf().set_mon_vals(m->config).then([this] {
if (config_updated) {
config_updated->set_value();
}
});
}
std::vector<unsigned> Client::get_random_mons(unsigned n) const
{
uint16_t min_priority = std::numeric_limits<uint16_t>::max();
for (const auto& m : monmap.mon_info) {
if (m.second.priority < min_priority) {
min_priority = m.second.priority;
}
}
vector<unsigned> ranks;
for (auto [name, info] : monmap.mon_info) {
if (info.priority == min_priority) {
ranks.push_back(monmap.get_rank(name));
}
}
std::random_device rd;
std::default_random_engine rng{rd()};
std::shuffle(ranks.begin(), ranks.end(), rng);
if (n == 0 || n > ranks.size()) {
return ranks;
} else {
return {ranks.begin(), ranks.begin() + n};
}
}
seastar::future<> Client::authenticate()
{
return reopen_session(-1).then([this](bool opened) {
if (opened) {
return on_session_opened();
} else {
return seastar::now();
}
});
}
seastar::future<> Client::stop()
{
logger().info("{}", __func__);
auto fut = gate.close();
timer.cancel();
ready_to_send = false;
for (auto& pending_con : pending_conns) {
pending_con->close();
}
if (active_con) {
active_con->close();
}
return fut;
}
static entity_addr_t choose_client_addr(
const entity_addrvec_t& my_addrs,
const entity_addrvec_t& client_addrs)
{
// here is where we decide which of the addrs to connect to. always prefer
// the first one, if we support it.
for (const auto& a : client_addrs.v) {
if (a.is_msgr2()) {
// FIXME: for ipv4 vs ipv6, check whether local host can handle ipv6 before
// trying it? for now, just pick whichever is listed first.
return a;
}
}
return entity_addr_t{};
}
seastar::future<bool> Client::reopen_session(int rank)
{
logger().info("{} to mon.{}", __func__, rank);
ready_to_send = false;
if (active_con) {
active_con->close();
active_con = nullptr;
ceph_assert(pending_conns.empty());
} else {
for (auto& pending_con : pending_conns) {
pending_con->close();
}
pending_conns.clear();
}
vector<unsigned> mons;
if (rank >= 0) {
mons.push_back(rank);
} else {
const auto parallel =
crimson::common::local_conf().get_val<uint64_t>("mon_client_hunt_parallel");
mons = get_random_mons(parallel);
}
pending_conns.reserve(mons.size());
return seastar::parallel_for_each(mons, [this](auto rank) {
auto peer = choose_client_addr(msgr.get_myaddrs(),
monmap.get_addrs(rank));
if (peer == entity_addr_t{}) {
// crimson msgr only uses the first bound addr
logger().warn("mon.{} does not have an addr compatible with me", rank);
return seastar::now();
}
logger().info("connecting to mon.{}", rank);
auto conn = msgr.connect(peer, CEPH_ENTITY_TYPE_MON);
auto& mc = pending_conns.emplace_back(
seastar::make_shared<Connection>(auth_registry, conn, &keyring));
assert(conn->get_peer_addr().is_msgr2());
return mc->authenticate_v2().then([peer, this](auto result) {
if (result == Connection::auth_result_t::success) {
_finish_auth(peer);
}
logger().debug("reopen_session mon connection attempts complete");
}).handle_exception([](auto ep) {
logger().error("mon connections failed with ep {}", ep);
return seastar::make_exception_future(ep);
});
}).then([this] {
if (active_con) {
return true;
} else {
logger().warn("cannot establish the active_con with any mon");
return false;
}
});
}
void Client::_finish_auth(const entity_addr_t& peer)
{
if (!is_hunting()) {
return;
}
logger().info("found mon.{}", monmap.get_name(peer));
auto found = std::find_if(
pending_conns.begin(), pending_conns.end(),
[peer](auto& conn) {
return conn->is_my_peer(peer);
});
if (found == pending_conns.end()) {
// Happens if another connection has won the race
ceph_assert(active_con && pending_conns.empty());
logger().info("no pending connection for mon.{}, peer {}",
monmap.get_name(peer), peer);
return;
}
ceph_assert(!active_con && !pending_conns.empty());
// It's too early to toggle the `ready_to_send` flag. It will
// be set atfer finishing the MAuth exchange and draining out
// the `pending_messages` queue.
active_con = std::move(*found);
*found = nullptr;
for (auto& conn : pending_conns) {
if (conn) {
conn->close();
}
}
pending_conns.clear();
}
Client::command_result_t
Client::run_command(std::string&& cmd,
bufferlist&& bl)
{
auto m = crimson::make_message<MMonCommand>(monmap.fsid);
auto tid = ++last_mon_command_id;
m->set_tid(tid);
m->cmd = {std::move(cmd)};
m->set_data(std::move(bl));
auto& command = mon_commands.emplace_back(crimson::make_message<MMonCommand>(*m));
return send_message(std::move(m)).then([&result=command.result] {
return result.get_future();
});
}
seastar::future<> Client::send_message(MessageURef m)
{
if (active_con && ready_to_send) {
assert(pending_messages.empty());
return active_con->get_conn()->send(std::move(m));
} else {
auto& delayed = pending_messages.emplace_back(std::move(m));
return delayed.pr.get_future();
}
}
seastar::future<> Client::on_session_opened()
{
return active_con->renew_rotating_keyring().then([this] {
if (!active_con) {
// the connection can be closed even in the middle of the opening sequence
logger().info("on_session_opened {}: connection closed", __LINE__);
return seastar::now();
}
for (auto& m : pending_messages) {
(void) active_con->get_conn()->send(std::move(m.msg));
m.pr.set_value();
}
pending_messages.clear();
ready_to_send = true;
return sub.reload() ? renew_subs() : seastar::now();
}).then([this] {
if (!active_con) {
logger().info("on_session_opened {}: connection closed", __LINE__);
return seastar::now();
}
return seastar::parallel_for_each(mon_commands,
[this](auto &command) {
return send_message(crimson::make_message<MMonCommand>(*command.req));
});
});
}
bool Client::sub_want(const std::string& what, version_t start, unsigned flags)
{
return sub.want(what, start, flags);
}
void Client::sub_got(const std::string& what, version_t have)
{
sub.got(what, have);
}
void Client::sub_unwant(const std::string& what)
{
sub.unwant(what);
}
bool Client::sub_want_increment(const std::string& what,
version_t start,
unsigned flags)
{
return sub.inc_want(what, start, flags);
}
seastar::future<> Client::renew_subs()
{
if (!sub.have_new()) {
logger().warn("{} - empty", __func__);
return seastar::now();
}
logger().trace("{}", __func__);
auto m = crimson::make_message<MMonSubscribe>();
m->what = sub.get_subs();
m->hostname = ceph_get_short_hostname();
return send_message(std::move(m)).then([this] {
sub.renewed();
});
}
seastar::future<> Client::wait_for_config()
{
assert(!config_updated);
config_updated = seastar::promise<>();
return config_updated->get_future();
}
void Client::print(std::ostream& out) const
{
out << "mon." << entity_name;
}
} // namespace crimson::mon
| 36,898 | 30.727429 | 94 | cc |
null | ceph-main/src/crimson/mon/MonClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <vector>
#include <seastar/core/future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/lowres_clock.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/timer.hh>
#include "auth/AuthRegistry.h"
#include "auth/KeyRing.h"
#include "common/ceph_context.h"
#include "crimson/auth/AuthClient.h"
#include "crimson/auth/AuthServer.h"
#include "crimson/common/auth_handler.h"
#include "crimson/common/gated.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Fwd.h"
#include "mon/MonMap.h"
#include "mon/MonSub.h"
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
namespace crimson::net {
class Messenger;
}
class LogClient;
struct AuthAuthorizeHandler;
class MAuthReply;
struct MMonMap;
struct MMonSubscribeAck;
struct MMonGetVersionReply;
struct MMonCommand;
struct MMonCommandAck;
struct MLogAck;
struct MConfig;
enum class log_flushing_t;
namespace crimson::mon {
class Connection;
class Client : public crimson::net::Dispatcher,
public crimson::auth::AuthClient,
public crimson::auth::AuthServer
{
EntityName entity_name;
KeyRing keyring;
const uint32_t want_keys;
MonMap monmap;
bool ready_to_send = false;
seastar::shared_ptr<Connection> active_con;
std::vector<seastar::shared_ptr<Connection>> pending_conns;
seastar::timer<seastar::lowres_clock> timer;
crimson::net::Messenger& msgr;
LogClient *log_client;
bool more_log_pending = false;
utime_t last_send_log;
seastar::future<> send_log(log_flushing_t flush_flag);
seastar::future<> wait_for_send_log();
// commands
using get_version_t = seastar::future<std::tuple<version_t, version_t>>;
ceph_tid_t last_version_req_id = 0;
std::map<ceph_tid_t, typename get_version_t::promise_type> version_reqs;
ceph_tid_t last_mon_command_id = 0;
using command_result_t =
seastar::future<std::tuple<std::int32_t, std::string, ceph::bufferlist>>;
struct mon_command_t {
MURef<MMonCommand> req;
typename command_result_t::promise_type result;
mon_command_t(MURef<MMonCommand> req);
};
std::vector<mon_command_t> mon_commands;
MonSub sub;
public:
Client(crimson::net::Messenger&, crimson::common::AuthHandler&);
Client(Client&&);
~Client();
seastar::future<> start();
seastar::future<> stop();
void set_log_client(LogClient *clog) {
log_client = clog;
}
const uuid_d& get_fsid() const {
return monmap.fsid;
}
get_version_t get_version(const std::string& map);
command_result_t run_command(std::string&& cmd,
bufferlist&& bl);
seastar::future<> send_message(MessageURef);
bool sub_want(const std::string& what, version_t start, unsigned flags);
void sub_got(const std::string& what, version_t have);
void sub_unwant(const std::string& what);
bool sub_want_increment(const std::string& what, version_t start, unsigned flags);
seastar::future<> renew_subs();
seastar::future<> wait_for_config();
void print(std::ostream&) const;
private:
// AuthServer methods
std::pair<std::vector<uint32_t>, std::vector<uint32_t>>
get_supported_auth_methods(int peer_type) final;
uint32_t pick_con_mode(int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes) final;
AuthAuthorizeHandler* get_auth_authorize_handler(int peer_type,
int auth_method) final;
int handle_auth_request(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
bool more,
uint32_t auth_method,
const ceph::bufferlist& payload,
uint64_t *p_peer_global_id,
ceph::bufferlist *reply) final;
crimson::common::CephContext cct; // for auth_registry
AuthRegistry auth_registry;
crimson::common::AuthHandler& auth_handler;
// AuthClient methods
crimson::auth::AuthClient::auth_request_t
get_auth_request(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta) final;
// Handle server's request to continue the handshake
ceph::bufferlist handle_auth_reply_more(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
const bufferlist& bl) final;
// Handle server's indication that authentication succeeded
int handle_auth_done(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint64_t global_id,
uint32_t con_mode,
const bufferlist& bl) final;
// Handle server's indication that the previous auth attempt failed
int handle_auth_bad_method(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) final;
private:
void tick();
std::optional<seastar::future<>> ms_dispatch(crimson::net::ConnectionRef conn,
MessageRef m) override;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) override;
seastar::future<> handle_monmap(crimson::net::Connection &conn,
Ref<MMonMap> m);
seastar::future<> handle_auth_reply(crimson::net::Connection &conn,
Ref<MAuthReply> m);
seastar::future<> handle_subscribe_ack(Ref<MMonSubscribeAck> m);
seastar::future<> handle_get_version_reply(Ref<MMonGetVersionReply> m);
seastar::future<> handle_mon_command_ack(Ref<MMonCommandAck> m);
seastar::future<> handle_log_ack(Ref<MLogAck> m);
seastar::future<> handle_config(Ref<MConfig> m);
seastar::future<> on_session_opened();
private:
seastar::future<> load_keyring();
seastar::future<> authenticate();
bool is_hunting() const;
// @param rank, rank of the monitor to be connected, if it is less than 0,
// try to connect to all monitors in monmap, until one of them
// is connected.
// @return true if a connection to monitor is established
seastar::future<bool> reopen_session(int rank);
std::vector<unsigned> get_random_mons(unsigned n) const;
seastar::future<> _add_conn(unsigned rank, uint64_t global_id);
void _finish_auth(const entity_addr_t& peer);
crimson::common::Gated gate;
// messages that are waiting for the active_con to be available
struct pending_msg_t {
pending_msg_t(MessageURef m) : msg(std::move(m)) {}
MessageURef msg;
seastar::promise<> pr;
};
std::deque<pending_msg_t> pending_messages;
std::optional<seastar::promise<>> config_updated;
};
inline std::ostream& operator<<(std::ostream& out, const Client& client) {
client.print(out);
return out;
}
} // namespace crimson::mon
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::mon::Client> : fmt::ostream_formatter {};
#endif
| 6,818 | 30.136986 | 84 | h |
null | ceph-main/src/crimson/net/Connection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <queue>
#include <seastar/core/future.hh>
#include <seastar/core/shared_ptr.hh>
#include "Fwd.h"
namespace crimson::net {
using seq_num_t = uint64_t;
/**
* Connection
*
* Abstraction for messenger connections.
*
* Except when otherwise specified, methods must be invoked from the core on which
* the connection originates.
*/
class Connection : public seastar::enable_shared_from_this<Connection> {
public:
using clock_t = seastar::lowres_system_clock;
Connection() {}
virtual ~Connection() {}
/**
* get_shard_id
*
* The shard id where the Connection is dispatching events and handling I/O.
*
* May be changed with the accept/connect events.
*/
virtual const seastar::shard_id get_shard_id() const = 0;
virtual const entity_name_t &get_peer_name() const = 0;
entity_type_t get_peer_type() const { return get_peer_name().type(); }
int64_t get_peer_id() const { return get_peer_name().num(); }
bool peer_is_mon() const { return get_peer_name().is_mon(); }
bool peer_is_mgr() const { return get_peer_name().is_mgr(); }
bool peer_is_mds() const { return get_peer_name().is_mds(); }
bool peer_is_osd() const { return get_peer_name().is_osd(); }
bool peer_is_client() const { return get_peer_name().is_client(); }
virtual const entity_addr_t &get_peer_addr() const = 0;
const entity_addrvec_t get_peer_addrs() const {
return entity_addrvec_t(get_peer_addr());
}
virtual const entity_addr_t &get_peer_socket_addr() const = 0;
virtual uint64_t get_features() const = 0;
bool has_feature(uint64_t f) const {
return get_features() & f;
}
/// true if the handshake has completed and no errors have been encountered
virtual bool is_connected() const = 0;
/**
* send
*
* Send a message over a connection that has completed its handshake.
*
* May be invoked from any core, but that requires to chain the returned
* future to preserve ordering.
*/
virtual seastar::future<> send(MessageURef msg) = 0;
/**
* send_keepalive
*
* Send a keepalive message over a connection that has completed its
* handshake.
*
* May be invoked from any core, but that requires to chain the returned
* future to preserve ordering.
*/
virtual seastar::future<> send_keepalive() = 0;
virtual clock_t::time_point get_last_keepalive() const = 0;
virtual clock_t::time_point get_last_keepalive_ack() const = 0;
// workaround for the monitor client
virtual void set_last_keepalive_ack(clock_t::time_point when) = 0;
// close the connection and cancel any any pending futures from read/send,
// without dispatching any reset event
virtual void mark_down() = 0;
struct user_private_t {
virtual ~user_private_t() = default;
};
virtual bool has_user_private() const = 0;
virtual user_private_t &get_user_private() = 0;
virtual void set_user_private(std::unique_ptr<user_private_t>) = 0;
virtual void print(std::ostream& out) const = 0;
#ifdef UNIT_TESTS_BUILT
virtual bool is_closed() const = 0;
virtual bool is_closed_clean() const = 0;
virtual bool peer_wins() const = 0;
#endif
};
inline std::ostream& operator<<(std::ostream& out, const Connection& conn) {
out << "[";
conn.print(out);
out << "]";
return out;
}
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::Connection> : fmt::ostream_formatter {};
#endif
| 3,853 | 25.763889 | 88 | h |
null | ceph-main/src/crimson/net/Dispatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "Fwd.h"
class AuthAuthorizer;
namespace crimson::net {
class Dispatcher {
public:
virtual ~Dispatcher() {}
// Dispatchers are put into a chain as described by chain-of-responsibility
// pattern. If any of the dispatchers claims this message, it returns a valid
// future to prevent other dispatchers from processing it, and this is also
// used to throttle the connection if it's too busy.
virtual std::optional<seastar::future<>> ms_dispatch(ConnectionRef, MessageRef) = 0;
// The connection is accepted or recoverred(lossless), all the followup
// events and messages will be dispatched to the new_shard.
//
// is_replace=true means the accepted connection has replaced
// another connecting connection with the same peer_addr, which currently only
// happens under lossy policy when both sides wish to connect to each other.
virtual void ms_handle_accept(ConnectionRef conn, seastar::shard_id new_shard, bool is_replace) {}
// The connection is (re)connected, all the followup events and messages will
// be dispatched to the new_shard.
virtual void ms_handle_connect(ConnectionRef conn, seastar::shard_id new_shard) {}
// a reset event is dispatched when the connection is closed unexpectedly.
//
// is_replace=true means the reset connection is going to be replaced by
// another accepting connection with the same peer_addr, which currently only
// happens under lossy policy when both sides wish to connect to each other.
virtual void ms_handle_reset(ConnectionRef conn, bool is_replace) {}
virtual void ms_handle_remote_reset(ConnectionRef conn) {}
};
} // namespace crimson::net
| 2,071 | 36 | 100 | h |
null | ceph-main/src/crimson/net/Errors.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "Errors.h"
namespace crimson::net {
const std::error_category& net_category()
{
struct category : public std::error_category {
const char* name() const noexcept override {
return "crimson::net";
}
std::string message(int ev) const override {
switch (static_cast<error>(ev)) {
case error::success:
return "success";
case error::bad_connect_banner:
return "bad connect banner";
case error::bad_peer_address:
return "bad peer address";
case error::negotiation_failure:
return "negotiation failure";
case error::read_eof:
return "read eof";
case error::corrupted_message:
return "corrupted message";
case error::protocol_aborted:
return "protocol aborted";
default:
return "unknown";
}
}
};
static category instance;
return instance;
}
} // namespace crimson::net
| 1,363 | 25.230769 | 70 | cc |
null | ceph-main/src/crimson/net/Errors.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <system_error>
namespace crimson::net {
/// net error codes
enum class error {
success = 0,
bad_connect_banner,
bad_peer_address,
negotiation_failure,
read_eof,
corrupted_message,
protocol_aborted,
};
/// net error category
const std::error_category& net_category();
inline std::error_code make_error_code(error e)
{
return {static_cast<int>(e), net_category()};
}
inline std::error_condition make_error_condition(error e)
{
return {static_cast<int>(e), net_category()};
}
} // namespace crimson::net
namespace std {
/// enables implicit conversion to std::error_condition
template <>
struct is_error_condition_enum<crimson::net::error> : public true_type {};
} // namespace std
| 1,133 | 20 | 74 | h |
null | ceph-main/src/crimson/net/FrameAssemblerV2.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "FrameAssemblerV2.h"
#include "Errors.h"
#include "SocketConnection.h"
#ifdef UNIT_TESTS_BUILT
#include "Interceptor.h"
#endif
using ceph::msgr::v2::FrameAssembler;
using ceph::msgr::v2::FrameError;
using ceph::msgr::v2::preamble_block_t;
using ceph::msgr::v2::segment_t;
using ceph::msgr::v2::Tag;
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_ms);
}
} // namespace anonymous
namespace crimson::net {
FrameAssemblerV2::FrameAssemblerV2(SocketConnection &_conn)
: conn{_conn}, sid{seastar::this_shard_id()}
{
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
}
FrameAssemblerV2::~FrameAssemblerV2()
{
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
assert(seastar::this_shard_id() == sid);
if (has_socket()) {
std::ignore = move_socket();
}
}
#ifdef UNIT_TESTS_BUILT
// should be consistent to intercept() in ProtocolV2.cc
void FrameAssemblerV2::intercept_frame(Tag tag, bool is_write)
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
if (conn.interceptor) {
auto type = is_write ? bp_type_t::WRITE : bp_type_t::READ;
// FIXME: doesn't support cross-core
auto action = conn.interceptor->intercept(
conn.get_local_shared_foreign_from_this(),
Breakpoint{tag, type});
// tolerate leaking future in tests
std::ignore = seastar::smp::submit_to(
socket->get_shard_id(),
[this, type, action] {
socket->set_trap(type, action, &conn.interceptor->blocker);
});
}
}
#endif
void FrameAssemblerV2::set_is_rev1(bool _is_rev1)
{
assert(seastar::this_shard_id() == sid);
is_rev1 = _is_rev1;
tx_frame_asm.set_is_rev1(_is_rev1);
rx_frame_asm.set_is_rev1(_is_rev1);
}
void FrameAssemblerV2::create_session_stream_handlers(
const AuthConnectionMeta &auth_meta,
bool crossed)
{
assert(seastar::this_shard_id() == sid);
session_stream_handlers = ceph::crypto::onwire::rxtx_t::create_handler_pair(
nullptr, auth_meta, is_rev1, crossed);
}
void FrameAssemblerV2::reset_handlers()
{
assert(seastar::this_shard_id() == sid);
session_stream_handlers = { nullptr, nullptr };
session_comp_handlers = { nullptr, nullptr };
}
FrameAssemblerV2::mover_t
FrameAssemblerV2::to_replace()
{
assert(seastar::this_shard_id() == sid);
assert(is_socket_valid());
clear();
return mover_t{
move_socket(),
std::move(session_stream_handlers),
std::move(session_comp_handlers)};
}
seastar::future<> FrameAssemblerV2::replace_by(FrameAssemblerV2::mover_t &&mover)
{
assert(seastar::this_shard_id() == sid);
clear();
session_stream_handlers = std::move(mover.session_stream_handlers);
session_comp_handlers = std::move(mover.session_comp_handlers);
if (has_socket()) {
return replace_shutdown_socket(std::move(mover.socket));
} else {
set_socket(std::move(mover.socket));
return seastar::now();
}
}
void FrameAssemblerV2::start_recording()
{
assert(seastar::this_shard_id() == sid);
record_io = true;
rxbuf.clear();
txbuf.clear();
}
FrameAssemblerV2::record_bufs_t
FrameAssemblerV2::stop_recording()
{
assert(seastar::this_shard_id() == sid);
ceph_assert_always(record_io == true);
record_io = false;
return record_bufs_t{std::move(rxbuf), std::move(txbuf)};
}
bool FrameAssemblerV2::has_socket() const
{
assert((socket && conn.socket) || (!socket && !conn.socket));
return bool(socket);
}
bool FrameAssemblerV2::is_socket_valid() const
{
assert(seastar::this_shard_id() == sid);
#ifndef NDEBUG
if (has_socket() && socket->get_shard_id() == sid) {
assert(socket->is_shutdown() == is_socket_shutdown);
}
#endif
return has_socket() && !is_socket_shutdown;
}
seastar::shard_id
FrameAssemblerV2::get_socket_shard_id() const
{
assert(seastar::this_shard_id() == sid);
assert(is_socket_valid());
return socket->get_shard_id();
}
SocketFRef FrameAssemblerV2::move_socket()
{
assert(has_socket());
conn.set_socket(nullptr);
return std::move(socket);
}
void FrameAssemblerV2::set_socket(SocketFRef &&new_socket)
{
assert(seastar::this_shard_id() == sid);
assert(!has_socket());
assert(new_socket);
socket = std::move(new_socket);
conn.set_socket(socket.get());
is_socket_shutdown = false;
assert(is_socket_valid());
}
void FrameAssemblerV2::learn_socket_ephemeral_port_as_connector(uint16_t port)
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
// Note: may not invoke on the socket core
socket->learn_ephemeral_port_as_connector(port);
}
template <bool may_cross_core>
void FrameAssemblerV2::shutdown_socket(crimson::common::Gated *gate)
{
assert(seastar::this_shard_id() == sid);
assert(is_socket_valid());
is_socket_shutdown = true;
if constexpr (may_cross_core) {
assert(conn.get_messenger_shard_id() == sid);
assert(gate);
gate->dispatch_in_background("shutdown_socket", conn, [this] {
return seastar::smp::submit_to(
socket->get_shard_id(), [this] {
socket->shutdown();
});
});
} else {
assert(socket->get_shard_id() == sid);
assert(!gate);
socket->shutdown();
}
}
template void FrameAssemblerV2::shutdown_socket<true>(crimson::common::Gated *);
template void FrameAssemblerV2::shutdown_socket<false>(crimson::common::Gated *);
seastar::future<> FrameAssemblerV2::replace_shutdown_socket(SocketFRef &&new_socket)
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
assert(!is_socket_valid());
auto old_socket = move_socket();
auto old_socket_shard_id = old_socket->get_shard_id();
set_socket(std::move(new_socket));
return seastar::smp::submit_to(
old_socket_shard_id,
[old_socket = std::move(old_socket)]() mutable {
return old_socket->close(
).then([sock = std::move(old_socket)] {});
});
}
seastar::future<> FrameAssemblerV2::close_shutdown_socket()
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
assert(!is_socket_valid());
return seastar::smp::submit_to(
socket->get_shard_id(), [this] {
return socket->close();
});
}
template <bool may_cross_core>
seastar::future<ceph::bufferptr>
FrameAssemblerV2::read_exactly(std::size_t bytes)
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
if constexpr (may_cross_core) {
assert(conn.get_messenger_shard_id() == sid);
return seastar::smp::submit_to(
socket->get_shard_id(), [this, bytes] {
return socket->read_exactly(bytes);
}).then([this](auto bptr) {
if (record_io) {
rxbuf.append(bptr);
}
return bptr;
});
} else {
assert(socket->get_shard_id() == sid);
return socket->read_exactly(bytes);
}
}
template seastar::future<ceph::bufferptr> FrameAssemblerV2::read_exactly<true>(std::size_t);
template seastar::future<ceph::bufferptr> FrameAssemblerV2::read_exactly<false>(std::size_t);
template <bool may_cross_core>
seastar::future<ceph::bufferlist>
FrameAssemblerV2::read(std::size_t bytes)
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
if constexpr (may_cross_core) {
assert(conn.get_messenger_shard_id() == sid);
return seastar::smp::submit_to(
socket->get_shard_id(), [this, bytes] {
return socket->read(bytes);
}).then([this](auto buf) {
if (record_io) {
rxbuf.append(buf);
}
return buf;
});
} else {
assert(socket->get_shard_id() == sid);
return socket->read(bytes);
}
}
template seastar::future<ceph::bufferlist> FrameAssemblerV2::read<true>(std::size_t);
template seastar::future<ceph::bufferlist> FrameAssemblerV2::read<false>(std::size_t);
template <bool may_cross_core>
seastar::future<>
FrameAssemblerV2::write(ceph::bufferlist buf)
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
if constexpr (may_cross_core) {
assert(conn.get_messenger_shard_id() == sid);
if (record_io) {
txbuf.append(buf);
}
return seastar::smp::submit_to(
socket->get_shard_id(), [this, buf = std::move(buf)]() mutable {
return socket->write(std::move(buf));
});
} else {
assert(socket->get_shard_id() == sid);
return socket->write(std::move(buf));
}
}
template seastar::future<> FrameAssemblerV2::write<true>(ceph::bufferlist);
template seastar::future<> FrameAssemblerV2::write<false>(ceph::bufferlist);
template <bool may_cross_core>
seastar::future<>
FrameAssemblerV2::flush()
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
if constexpr (may_cross_core) {
assert(conn.get_messenger_shard_id() == sid);
return seastar::smp::submit_to(
socket->get_shard_id(), [this] {
return socket->flush();
});
} else {
assert(socket->get_shard_id() == sid);
return socket->flush();
}
}
template seastar::future<> FrameAssemblerV2::flush<true>();
template seastar::future<> FrameAssemblerV2::flush<false>();
template <bool may_cross_core>
seastar::future<>
FrameAssemblerV2::write_flush(ceph::bufferlist buf)
{
assert(seastar::this_shard_id() == sid);
assert(has_socket());
if constexpr (may_cross_core) {
assert(conn.get_messenger_shard_id() == sid);
if (unlikely(record_io)) {
txbuf.append(buf);
}
return seastar::smp::submit_to(
socket->get_shard_id(), [this, buf = std::move(buf)]() mutable {
return socket->write_flush(std::move(buf));
});
} else {
assert(socket->get_shard_id() == sid);
return socket->write_flush(std::move(buf));
}
}
template seastar::future<> FrameAssemblerV2::write_flush<true>(ceph::bufferlist);
template seastar::future<> FrameAssemblerV2::write_flush<false>(ceph::bufferlist);
template <bool may_cross_core>
seastar::future<FrameAssemblerV2::read_main_t>
FrameAssemblerV2::read_main_preamble()
{
assert(seastar::this_shard_id() == sid);
rx_preamble.clear();
return read_exactly<may_cross_core>(
rx_frame_asm.get_preamble_onwire_len()
).then([this](auto bptr) {
try {
rx_preamble.append(std::move(bptr));
const Tag tag = rx_frame_asm.disassemble_preamble(rx_preamble);
#ifdef UNIT_TESTS_BUILT
intercept_frame(tag, false);
#endif
return read_main_t{tag, &rx_frame_asm};
} catch (FrameError& e) {
logger().warn("{} read_main_preamble: {}", conn, e.what());
throw std::system_error(make_error_code(crimson::net::error::negotiation_failure));
}
});
}
template seastar::future<FrameAssemblerV2::read_main_t> FrameAssemblerV2::read_main_preamble<true>();
template seastar::future<FrameAssemblerV2::read_main_t> FrameAssemblerV2::read_main_preamble<false>();
template <bool may_cross_core>
seastar::future<FrameAssemblerV2::read_payload_t*>
FrameAssemblerV2::read_frame_payload()
{
assert(seastar::this_shard_id() == sid);
rx_segments_data.clear();
return seastar::do_until(
[this] {
return rx_frame_asm.get_num_segments() == rx_segments_data.size();
},
[this] {
// TODO: create aligned and contiguous buffer from socket
const size_t seg_idx = rx_segments_data.size();
if (uint16_t alignment = rx_frame_asm.get_segment_align(seg_idx);
alignment != segment_t::DEFAULT_ALIGNMENT) {
logger().trace("{} cannot allocate {} aligned buffer at segment desc index {}",
conn, alignment, rx_segments_data.size());
}
uint32_t onwire_len = rx_frame_asm.get_segment_onwire_len(seg_idx);
// TODO: create aligned and contiguous buffer from socket
return read_exactly<may_cross_core>(onwire_len
).then([this](auto bptr) {
logger().trace("{} RECV({}) frame segment[{}]",
conn, bptr.length(), rx_segments_data.size());
bufferlist segment;
segment.append(std::move(bptr));
rx_segments_data.emplace_back(std::move(segment));
});
}
).then([this] {
return read_exactly<may_cross_core>(rx_frame_asm.get_epilogue_onwire_len());
}).then([this](auto bptr) {
logger().trace("{} RECV({}) frame epilogue", conn, bptr.length());
bool ok = false;
try {
bufferlist rx_epilogue;
rx_epilogue.append(std::move(bptr));
ok = rx_frame_asm.disassemble_segments(rx_preamble, rx_segments_data.data(), rx_epilogue);
} catch (FrameError& e) {
logger().error("read_frame_payload: {} {}", conn, e.what());
throw std::system_error(make_error_code(crimson::net::error::negotiation_failure));
} catch (ceph::crypto::onwire::MsgAuthError&) {
logger().error("read_frame_payload: {} bad auth tag", conn);
throw std::system_error(make_error_code(crimson::net::error::negotiation_failure));
}
// we do have a mechanism that allows transmitter to start sending message
// and abort after putting entire data field on wire. This will be used by
// the kernel client to avoid unnecessary buffering.
if (!ok) {
ceph_abort("TODO");
}
return &rx_segments_data;
});
}
template seastar::future<FrameAssemblerV2::read_payload_t*> FrameAssemblerV2::read_frame_payload<true>();
template seastar::future<FrameAssemblerV2::read_payload_t*> FrameAssemblerV2::read_frame_payload<false>();
void FrameAssemblerV2::log_main_preamble(const ceph::bufferlist &bl)
{
const auto main_preamble =
reinterpret_cast<const preamble_block_t*>(bl.front().c_str());
logger().trace("{} SEND({}) frame: tag={}, num_segments={}, crc={}",
conn, bl.length(), (int)main_preamble->tag,
(int)main_preamble->num_segments, main_preamble->crc);
}
FrameAssemblerV2Ref FrameAssemblerV2::create(SocketConnection &conn)
{
return std::make_unique<FrameAssemblerV2>(conn);
}
void FrameAssemblerV2::clear()
{
record_io = false;
rxbuf.clear();
txbuf.clear();
rx_preamble.clear();
rx_segments_data.clear();
}
} // namespace crimson::net
| 13,943 | 29.247289 | 106 | cc |
null | ceph-main/src/crimson/net/FrameAssemblerV2.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "msg/async/frames_v2.h"
#include "msg/async/crypto_onwire.h"
#include "msg/async/compression_onwire.h"
#include "crimson/common/gated.h"
#include "crimson/net/Socket.h"
namespace crimson::net {
class SocketConnection;
class FrameAssemblerV2;
using FrameAssemblerV2Ref = std::unique_ptr<FrameAssemblerV2>;
class FrameAssemblerV2 {
public:
FrameAssemblerV2(SocketConnection &conn);
~FrameAssemblerV2();
FrameAssemblerV2(const FrameAssemblerV2 &) = delete;
FrameAssemblerV2(FrameAssemblerV2 &&) = delete;
void set_shard_id(seastar::shard_id _sid) {
assert(seastar::this_shard_id() == sid);
clear();
sid = _sid;
}
seastar::shard_id get_shard_id() const {
return sid;
}
void set_is_rev1(bool is_rev1);
void create_session_stream_handlers(
const AuthConnectionMeta &auth_meta,
bool crossed);
void reset_handlers();
/*
* replacing
*/
struct mover_t {
SocketFRef socket;
ceph::crypto::onwire::rxtx_t session_stream_handlers;
ceph::compression::onwire::rxtx_t session_comp_handlers;
};
mover_t to_replace();
seastar::future<> replace_by(mover_t &&);
/*
* auth signature interfaces
*/
void start_recording();
struct record_bufs_t {
ceph::bufferlist rxbuf;
ceph::bufferlist txbuf;
};
record_bufs_t stop_recording();
/*
* socket maintainence interfaces
*/
// the socket exists and not shutdown
bool is_socket_valid() const;
seastar::shard_id get_socket_shard_id() const;
void set_socket(SocketFRef &&);
void learn_socket_ephemeral_port_as_connector(uint16_t port);
// if may_cross_core == true, gate is required for cross-core shutdown
template <bool may_cross_core>
void shutdown_socket(crimson::common::Gated *gate);
seastar::future<> replace_shutdown_socket(SocketFRef &&);
seastar::future<> close_shutdown_socket();
/*
* socket read and write interfaces
*/
template <bool may_cross_core = true>
seastar::future<ceph::bufferptr> read_exactly(std::size_t bytes);
template <bool may_cross_core = true>
seastar::future<ceph::bufferlist> read(std::size_t bytes);
template <bool may_cross_core = true>
seastar::future<> write(ceph::bufferlist);
template <bool may_cross_core = true>
seastar::future<> flush();
template <bool may_cross_core = true>
seastar::future<> write_flush(ceph::bufferlist);
/*
* frame read and write interfaces
*/
/// may throw negotiation_failure as fault
struct read_main_t {
ceph::msgr::v2::Tag tag;
const ceph::msgr::v2::FrameAssembler *rx_frame_asm;
};
template <bool may_cross_core = true>
seastar::future<read_main_t> read_main_preamble();
/// may throw negotiation_failure as fault
using read_payload_t = ceph::msgr::v2::segment_bls_t;
// FIXME: read_payload_t cannot be no-throw move constructible
template <bool may_cross_core = true>
seastar::future<read_payload_t*> read_frame_payload();
template <class F>
ceph::bufferlist get_buffer(F &tx_frame) {
assert(seastar::this_shard_id() == sid);
#ifdef UNIT_TESTS_BUILT
intercept_frame(F::tag, true);
#endif
auto bl = tx_frame.get_buffer(tx_frame_asm);
log_main_preamble(bl);
return bl;
}
template <class F, bool may_cross_core = true>
seastar::future<> write_flush_frame(F &tx_frame) {
assert(seastar::this_shard_id() == sid);
auto bl = get_buffer(tx_frame);
return write_flush<may_cross_core>(std::move(bl));
}
static FrameAssemblerV2Ref create(SocketConnection &conn);
private:
bool has_socket() const;
SocketFRef move_socket();
void clear();
void log_main_preamble(const ceph::bufferlist &bl);
#ifdef UNIT_TESTS_BUILT
void intercept_frame(ceph::msgr::v2::Tag, bool is_write);
#endif
SocketConnection &conn;
SocketFRef socket;
// checking Socket::is_shutdown() synchronously is impossible when sid is
// different from the socket sid.
bool is_socket_shutdown = false;
// the current working shard, can be messenger or socket shard.
// if is messenger shard, should call interfaces with may_cross_core = true.
seastar::shard_id sid;
/*
* auth signature
*
* only in the messenger core
*/
bool record_io = false;
ceph::bufferlist rxbuf;
ceph::bufferlist txbuf;
/*
* frame data and handlers
*/
ceph::crypto::onwire::rxtx_t session_stream_handlers = { nullptr, nullptr };
// TODO
ceph::compression::onwire::rxtx_t session_comp_handlers = { nullptr, nullptr };
bool is_rev1 = false;
ceph::msgr::v2::FrameAssembler tx_frame_asm{
&session_stream_handlers, is_rev1, common::local_conf()->ms_crc_data,
&session_comp_handlers};
ceph::msgr::v2::FrameAssembler rx_frame_asm{
&session_stream_handlers, is_rev1, common::local_conf()->ms_crc_data,
&session_comp_handlers};
// in the messenger core during handshake,
// and in the socket core during open,
// must be cleaned before switching cores.
ceph::bufferlist rx_preamble;
read_payload_t rx_segments_data;
};
} // namespace crimson::net
| 5,163 | 22.797235 | 81 | h |
null | ceph-main/src/crimson/net/Fwd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/container/small_vector.hpp>
#include <seastar/core/future.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/sharded.hh>
#include "msg/Connection.h"
#include "msg/MessageRef.h"
#include "msg/msg_types.h"
#include "crimson/common/errorator.h"
#include "crimson/common/local_shared_foreign_ptr.h"
class AuthConnectionMeta;
namespace crimson::net {
using msgr_tag_t = uint8_t;
using stop_t = seastar::stop_iteration;
class Connection;
using ConnectionLRef = seastar::shared_ptr<Connection>;
using ConnectionFRef = seastar::foreign_ptr<ConnectionLRef>;
using ConnectionRef = ::crimson::local_shared_foreign_ptr<ConnectionLRef>;
class Dispatcher;
class ChainedDispatchers;
constexpr std::size_t NUM_DISPATCHERS = 4u;
using dispatchers_t = boost::container::small_vector<Dispatcher*, NUM_DISPATCHERS>;
class Messenger;
using MessengerRef = seastar::shared_ptr<Messenger>;
using MessageFRef = seastar::foreign_ptr<MessageURef>;
} // namespace crimson::net
| 1,446 | 26.301887 | 83 | h |
null | ceph-main/src/crimson/net/Interceptor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <variant>
#include <seastar/core/sharded.hh>
#include <seastar/core/sleep.hh>
#include "Fwd.h"
#include "msg/async/frames_v2.h"
namespace crimson::net {
enum class custom_bp_t : uint8_t {
BANNER_WRITE = 0,
BANNER_READ,
BANNER_PAYLOAD_READ,
SOCKET_CONNECTING,
SOCKET_ACCEPTED
};
inline const char* get_bp_name(custom_bp_t bp) {
uint8_t index = static_cast<uint8_t>(bp);
static const char *const bp_names[] = {"BANNER_WRITE",
"BANNER_READ",
"BANNER_PAYLOAD_READ",
"SOCKET_CONNECTING",
"SOCKET_ACCEPTED"};
assert(index < std::size(bp_names));
return bp_names[index];
}
enum class bp_type_t {
READ = 0,
WRITE
};
enum class bp_action_t {
CONTINUE = 0,
FAULT,
BLOCK,
STALL
};
class socket_blocker {
std::optional<seastar::abort_source> p_blocked;
std::optional<seastar::abort_source> p_unblocked;
public:
seastar::future<> wait_blocked() {
ceph_assert(!p_blocked);
if (p_unblocked) {
return seastar::make_ready_future<>();
} else {
p_blocked = seastar::abort_source();
return seastar::sleep_abortable(std::chrono::seconds(10),
*p_blocked).then([] {
throw std::runtime_error(
"Timeout (10s) in socket_blocker::wait_blocked()");
}).handle_exception_type([] (const seastar::sleep_aborted& e) {
// wait done!
});
}
}
seastar::future<> block() {
if (p_blocked) {
p_blocked->request_abort();
p_blocked = std::nullopt;
}
ceph_assert(!p_unblocked);
p_unblocked = seastar::abort_source();
return seastar::sleep_abortable(std::chrono::seconds(10),
*p_unblocked).then([] {
ceph_abort("Timeout (10s) in socket_blocker::block()");
}).handle_exception_type([] (const seastar::sleep_aborted& e) {
// wait done!
});
}
void unblock() {
ceph_assert(!p_blocked);
ceph_assert(p_unblocked);
p_unblocked->request_abort();
p_unblocked = std::nullopt;
}
};
struct tag_bp_t {
ceph::msgr::v2::Tag tag;
bp_type_t type;
bool operator==(const tag_bp_t& x) const {
return tag == x.tag && type == x.type;
}
bool operator!=(const tag_bp_t& x) const { return !operator==(x); }
bool operator<(const tag_bp_t& x) const {
return std::tie(tag, type) < std::tie(x.tag, x.type);
}
};
struct Breakpoint {
using var_t = std::variant<custom_bp_t, tag_bp_t>;
var_t bp;
Breakpoint(custom_bp_t bp) : bp(bp) { }
Breakpoint(ceph::msgr::v2::Tag tag, bp_type_t type)
: bp(tag_bp_t{tag, type}) { }
bool operator==(const Breakpoint& x) const { return bp == x.bp; }
bool operator!=(const Breakpoint& x) const { return !operator==(x); }
bool operator==(const custom_bp_t& x) const { return bp == var_t(x); }
bool operator!=(const custom_bp_t& x) const { return !operator==(x); }
bool operator==(const tag_bp_t& x) const { return bp == var_t(x); }
bool operator!=(const tag_bp_t& x) const { return !operator==(x); }
bool operator<(const Breakpoint& x) const { return bp < x.bp; }
};
struct Interceptor {
socket_blocker blocker;
virtual ~Interceptor() {}
virtual void register_conn(ConnectionRef) = 0;
virtual void register_conn_ready(ConnectionRef) = 0;
virtual void register_conn_closed(ConnectionRef) = 0;
virtual void register_conn_replaced(ConnectionRef) = 0;
virtual bp_action_t intercept(ConnectionRef, Breakpoint bp) = 0;
};
} // namespace crimson::net
template<>
struct fmt::formatter<crimson::net::bp_action_t> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const crimson::net::bp_action_t& action, FormatContext& ctx) const {
static const char *const action_names[] = {"CONTINUE",
"FAULT",
"BLOCK",
"STALL"};
assert(static_cast<size_t>(action) < std::size(action_names));
return formatter<std::string_view>::format(action_names[static_cast<size_t>(action)], ctx);
}
};
template<>
struct fmt::formatter<crimson::net::Breakpoint> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const crimson::net::Breakpoint& bp, FormatContext& ctx) const {
if (auto custom_bp = std::get_if<crimson::net::custom_bp_t>(&bp.bp)) {
return formatter<std::string_view>::format(crimson::net::get_bp_name(*custom_bp), ctx);
}
auto tag_bp = std::get<crimson::net::tag_bp_t>(bp.bp);
static const char *const tag_names[] = {"NONE",
"HELLO",
"AUTH_REQUEST",
"AUTH_BAD_METHOD",
"AUTH_REPLY_MORE",
"AUTH_REQUEST_MORE",
"AUTH_DONE",
"AUTH_SIGNATURE",
"CLIENT_IDENT",
"SERVER_IDENT",
"IDENT_MISSING_FEATURES",
"SESSION_RECONNECT",
"SESSION_RESET",
"SESSION_RETRY",
"SESSION_RETRY_GLOBAL",
"SESSION_RECONNECT_OK",
"WAIT",
"MESSAGE",
"KEEPALIVE2",
"KEEPALIVE2_ACK",
"ACK"};
assert(static_cast<size_t>(tag_bp.tag) < std::size(tag_names));
return fmt::format_to(ctx.out(), "{}_{}",
tag_names[static_cast<size_t>(tag_bp.tag)],
tag_bp.type == crimson::net::bp_type_t::WRITE ? "WRITE" : "READ");
}
};
| 6,237 | 34.443182 | 95 | h |
null | ceph-main/src/crimson/net/Messenger.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Messenger.h"
#include "SocketMessenger.h"
namespace crimson::net {
MessengerRef
Messenger::create(const entity_name_t& name,
const std::string& lname,
uint64_t nonce,
bool dispatch_only_on_this_shard)
{
return seastar::make_shared<SocketMessenger>(
name, lname, nonce, dispatch_only_on_this_shard);
}
} // namespace crimson::net
| 503 | 24.2 | 70 | cc |
null | ceph-main/src/crimson/net/Messenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "Fwd.h"
#include "crimson/common/throttle.h"
#include "msg/Message.h"
#include "msg/Policy.h"
class AuthAuthorizer;
namespace crimson::auth {
class AuthClient;
class AuthServer;
}
namespace crimson::net {
#ifdef UNIT_TESTS_BUILT
class Interceptor;
#endif
using Throttle = crimson::common::Throttle;
using SocketPolicy = ceph::net::Policy<Throttle>;
class Messenger {
public:
Messenger() {}
virtual ~Messenger() {}
virtual const entity_name_t& get_myname() const = 0;
entity_type_t get_mytype() const { return get_myname().type(); }
virtual const entity_addrvec_t &get_myaddrs() const = 0;
entity_addr_t get_myaddr() const { return get_myaddrs().front(); }
virtual void set_myaddrs(const entity_addrvec_t& addrs) = 0;
virtual bool set_addr_unknowns(const entity_addrvec_t &addrs) = 0;
virtual void set_auth_client(crimson::auth::AuthClient *) = 0;
virtual void set_auth_server(crimson::auth::AuthServer *) = 0;
using bind_ertr = crimson::errorator<
crimson::ct_error::address_in_use, // The address (range) is already bound
crimson::ct_error::address_not_available
>;
/// bind to the given address
virtual bind_ertr::future<> bind(const entity_addrvec_t& addr) = 0;
/// start the messenger
virtual seastar::future<> start(const dispatchers_t&) = 0;
/// either return an existing connection to the peer,
/// or a new pending connection
virtual ConnectionRef
connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name) = 0;
ConnectionRef
connect(const entity_addr_t& peer_addr,
const entity_type_t& peer_type) {
return connect(peer_addr, entity_name_t(peer_type, -1));
}
virtual bool owns_connection(Connection &) const = 0;
// wait for messenger shutdown
virtual seastar::future<> wait() = 0;
// stop dispatching events and messages
virtual void stop() = 0;
virtual bool is_started() const = 0;
// free internal resources before destruction, must be called after stopped,
// and must be called if is bound.
virtual seastar::future<> shutdown() = 0;
virtual void print(std::ostream& out) const = 0;
virtual SocketPolicy get_policy(entity_type_t peer_type) const = 0;
virtual SocketPolicy get_default_policy() const = 0;
virtual void set_default_policy(const SocketPolicy& p) = 0;
virtual void set_policy(entity_type_t peer_type, const SocketPolicy& p) = 0;
virtual void set_policy_throttler(entity_type_t peer_type, Throttle* throttle) = 0;
static MessengerRef
create(const entity_name_t& name,
const std::string& lname,
uint64_t nonce,
bool dispatch_only_on_this_shard);
#ifdef UNIT_TESTS_BUILT
virtual void set_interceptor(Interceptor *) = 0;
#endif
};
inline std::ostream& operator<<(std::ostream& out, const Messenger& msgr) {
out << "[";
msgr.print(out);
out << "]";
return out;
}
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::Messenger> : fmt::ostream_formatter {};
#endif
| 3,456 | 25.389313 | 87 | h |
null | ceph-main/src/crimson/net/ProtocolV2.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ProtocolV2.h"
#include <fmt/format.h>
#include <fmt/ranges.h>
#include "include/msgr.h"
#include "include/random.h"
#include "msg/msg_fmt.h"
#include "crimson/auth/AuthClient.h"
#include "crimson/auth/AuthServer.h"
#include "crimson/common/formatter.h"
#include "crimson/common/log.h"
#include "Errors.h"
#include "SocketMessenger.h"
#ifdef UNIT_TESTS_BUILT
#include "Interceptor.h"
#endif
using namespace ceph::msgr::v2;
using crimson::common::local_conf;
namespace {
// TODO: CEPH_MSGR2_FEATURE_COMPRESSION
const uint64_t CRIMSON_MSGR2_SUPPORTED_FEATURES =
(CEPH_MSGR2_FEATURE_REVISION_1 |
// CEPH_MSGR2_FEATURE_COMPRESSION |
UINT64_C(0));
// Log levels in V2 Protocol:
// * error level, something error that cause connection to terminate:
// - fatal errors;
// - bugs;
// * warn level: something unusual that identifies connection fault or replacement:
// - unstable network;
// - incompatible peer;
// - auth failure;
// - connection race;
// - connection reset;
// * info level, something very important to show connection lifecycle,
// which doesn't happen very frequently;
// * debug level, important logs for debugging, including:
// - all the messages sent/received (-->/<==);
// - all the frames exchanged (WRITE/GOT);
// - important fields updated (UPDATE);
// - connection state transitions (TRIGGER);
// * trace level, trivial logs showing:
// - the exact bytes being sent/received (SEND/RECV(bytes));
// - detailed information of sub-frames;
// - integrity checks;
// - etc.
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_ms);
}
[[noreturn]] void abort_in_fault() {
throw std::system_error(make_error_code(crimson::net::error::negotiation_failure));
}
[[noreturn]] void abort_protocol() {
throw std::system_error(make_error_code(crimson::net::error::protocol_aborted));
}
#define ABORT_IN_CLOSE(is_dispatch_reset) { \
do_close(is_dispatch_reset); \
abort_protocol(); \
}
inline void expect_tag(const Tag& expected,
const Tag& actual,
crimson::net::SocketConnection& conn,
const char *where) {
if (actual != expected) {
logger().warn("{} {} received wrong tag: {}, expected {}",
conn, where,
static_cast<uint32_t>(actual),
static_cast<uint32_t>(expected));
abort_in_fault();
}
}
inline void unexpected_tag(const Tag& unexpected,
crimson::net::SocketConnection& conn,
const char *where) {
logger().warn("{} {} received unexpected tag: {}",
conn, where, static_cast<uint32_t>(unexpected));
abort_in_fault();
}
inline uint64_t generate_client_cookie() {
return ceph::util::generate_random_number<uint64_t>(
1, std::numeric_limits<uint64_t>::max());
}
} // namespace anonymous
namespace crimson::net {
#ifdef UNIT_TESTS_BUILT
// should be consistent to intercept_frame() in FrameAssemblerV2.cc
void intercept(Breakpoint bp,
bp_type_t type,
SocketConnection& conn,
Interceptor *interceptor,
Socket *socket) {
if (interceptor) {
auto action = interceptor->intercept(
conn.get_local_shared_foreign_from_this(),
Breakpoint(bp));
socket->set_trap(type, action, &interceptor->blocker);
}
}
#define INTERCEPT_CUSTOM(bp, type) \
intercept({bp}, type, conn, \
conn.interceptor, conn.socket)
#else
#define INTERCEPT_CUSTOM(bp, type)
#endif
seastar::future<> ProtocolV2::Timer::backoff(double seconds)
{
logger().warn("{} waiting {} seconds ...", conn, seconds);
cancel();
last_dur_ = seconds;
as = seastar::abort_source();
auto dur = std::chrono::duration_cast<seastar::lowres_clock::duration>(
std::chrono::duration<double>(seconds));
return seastar::sleep_abortable(dur, *as
).handle_exception_type([this] (const seastar::sleep_aborted& e) {
logger().debug("{} wait aborted", conn);
abort_protocol();
});
}
ProtocolV2::ProtocolV2(SocketConnection& conn,
IOHandler &io_handler)
: conn{conn},
messenger{conn.messenger},
io_handler{io_handler},
frame_assembler{FrameAssemblerV2::create(conn)},
auth_meta{seastar::make_lw_shared<AuthConnectionMeta>()},
protocol_timer{conn}
{
io_states = io_handler.get_states();
}
ProtocolV2::~ProtocolV2() {}
void ProtocolV2::start_connect(const entity_addr_t& _peer_addr,
const entity_name_t& _peer_name)
{
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
ceph_assert(state == state_t::NONE);
ceph_assert(!gate.is_closed());
conn.peer_addr = _peer_addr;
conn.target_addr = _peer_addr;
conn.set_peer_name(_peer_name);
conn.policy = messenger.get_policy(_peer_name.type());
client_cookie = generate_client_cookie();
logger().info("{} ProtocolV2::start_connect(): peer_addr={}, peer_name={}, cc={}"
" policy(lossy={}, server={}, standby={}, resetcheck={})",
conn, _peer_addr, _peer_name, client_cookie,
conn.policy.lossy, conn.policy.server,
conn.policy.standby, conn.policy.resetcheck);
messenger.register_conn(
seastar::static_pointer_cast<SocketConnection>(conn.shared_from_this()));
execute_connecting();
}
void ProtocolV2::start_accept(SocketFRef&& new_socket,
const entity_addr_t& _peer_addr)
{
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
ceph_assert(state == state_t::NONE);
// until we know better
conn.target_addr = _peer_addr;
frame_assembler->set_socket(std::move(new_socket));
has_socket = true;
is_socket_valid = true;
logger().info("{} ProtocolV2::start_accept(): target_addr={}", conn, _peer_addr);
messenger.accept_conn(
seastar::static_pointer_cast<SocketConnection>(conn.shared_from_this()));
auto cc_seq = crosscore.prepare_submit();
gate.dispatch_in_background("set_accepted_sid", conn, [this, cc_seq] {
return io_handler.set_accepted_sid(
cc_seq,
frame_assembler->get_socket_shard_id(),
seastar::make_foreign(conn.shared_from_this()));
});
execute_accepting();
}
void ProtocolV2::trigger_state_phase1(state_t new_state)
{
ceph_assert_always(!gate.is_closed());
if (new_state == state) {
logger().error("{} is not allowed to re-trigger state {}",
conn, get_state_name(state));
ceph_abort();
}
if (state == state_t::CLOSING) {
logger().error("{} CLOSING is not allowed to trigger state {}",
conn, get_state_name(new_state));
ceph_abort();
}
logger().debug("{} TRIGGER {}, was {}",
conn, get_state_name(new_state), get_state_name(state));
if (state == state_t::READY) {
// from READY
ceph_assert_always(!need_exit_io);
ceph_assert_always(!pr_exit_io.has_value());
need_exit_io = true;
pr_exit_io = seastar::shared_promise<>();
}
if (new_state == state_t::STANDBY && !conn.policy.server) {
need_notify_out = true;
} else {
need_notify_out = false;
}
state = new_state;
}
void ProtocolV2::trigger_state_phase2(
state_t new_state, io_state_t new_io_state)
{
ceph_assert_always(new_state == state);
ceph_assert_always(!gate.is_closed());
ceph_assert_always(!pr_switch_io_shard.has_value());
FrameAssemblerV2Ref fa;
if (new_state == state_t::READY) {
assert(new_io_state == io_state_t::open);
assert(io_handler.get_shard_id() ==
frame_assembler->get_socket_shard_id());
frame_assembler->set_shard_id(io_handler.get_shard_id());
fa = std::move(frame_assembler);
} else {
assert(new_io_state != io_state_t::open);
}
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} IOHandler::set_io_state(): new_state={}, new_io_state={}, "
"fa={}, set_notify_out={}",
conn, cc_seq, get_state_name(new_state), new_io_state,
fa ? fmt::format("(sid={})", fa->get_shard_id()) : "N/A",
need_notify_out);
gate.dispatch_in_background(
"set_io_state", conn,
[this, cc_seq, new_io_state, fa=std::move(fa)]() mutable {
return seastar::smp::submit_to(
io_handler.get_shard_id(),
[this, cc_seq, new_io_state,
fa=std::move(fa), set_notify_out=need_notify_out]() mutable {
return io_handler.set_io_state(
cc_seq, new_io_state, std::move(fa), set_notify_out);
});
});
if (need_exit_io) {
// from READY
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} IOHandler::wait_io_exit_dispatching() ...",
conn, cc_seq);
assert(pr_exit_io.has_value());
assert(new_io_state != io_state_t::open);
need_exit_io = false;
gate.dispatch_in_background("exit_io", conn, [this, cc_seq] {
return seastar::smp::submit_to(
io_handler.get_shard_id(), [this, cc_seq] {
return io_handler.wait_io_exit_dispatching(cc_seq);
}).then([this, cc_seq](auto ret) {
logger().debug("{} finish {} IOHandler::wait_io_exit_dispatching(), {}",
conn, cc_seq, ret.io_states);
frame_assembler = std::move(ret.frame_assembler);
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
ceph_assert_always(
seastar::this_shard_id() == frame_assembler->get_shard_id());
ceph_assert_always(!frame_assembler->is_socket_valid());
assert(!need_exit_io);
io_states = ret.io_states;
pr_exit_io->set_value();
pr_exit_io = std::nullopt;
});
});
}
}
void ProtocolV2::fault(
state_t expected_state,
const char *where,
std::exception_ptr eptr)
{
assert(expected_state == state_t::CONNECTING ||
expected_state == state_t::ESTABLISHING ||
expected_state == state_t::REPLACING ||
expected_state == state_t::READY);
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
if (state != expected_state) {
logger().info("{} protocol {} {} is aborted at inconsistent {} -- {}",
conn,
get_state_name(expected_state),
where,
get_state_name(state),
e_what);
#ifndef NDEBUG
if (expected_state == state_t::REPLACING) {
assert(state == state_t::CLOSING);
} else if (expected_state == state_t::READY) {
assert(state == state_t::CLOSING ||
state == state_t::REPLACING ||
state == state_t::CONNECTING ||
state == state_t::STANDBY);
} else {
assert(state == state_t::CLOSING ||
state == state_t::REPLACING);
}
#endif
return;
}
assert(state == expected_state);
if (state != state_t::CONNECTING && conn.policy.lossy) {
// socket will be shutdown in do_close()
logger().info("{} protocol {} {} fault on lossy channel, going to CLOSING -- {}",
conn, get_state_name(state), where, e_what);
do_close(true);
return;
}
if (likely(has_socket)) {
if (likely(is_socket_valid)) {
ceph_assert_always(state != state_t::READY);
frame_assembler->shutdown_socket<true>(&gate);
is_socket_valid = false;
} else {
ceph_assert_always(state != state_t::ESTABLISHING);
}
} else { // !has_socket
ceph_assert_always(state == state_t::CONNECTING);
assert(!is_socket_valid);
}
if (conn.policy.server ||
(conn.policy.standby && !io_states.is_out_queued_or_sent())) {
if (conn.policy.server) {
logger().info("{} protocol {} {} fault as server, going to STANDBY {} -- {}",
conn,
get_state_name(state),
where,
io_states,
e_what);
} else {
logger().info("{} protocol {} {} fault with nothing to send, going to STANDBY {} -- {}",
conn,
get_state_name(state),
where,
io_states,
e_what);
}
execute_standby();
} else if (state == state_t::CONNECTING ||
state == state_t::REPLACING) {
logger().info("{} protocol {} {} fault, going to WAIT {} -- {}",
conn,
get_state_name(state),
where,
io_states,
e_what);
execute_wait(false);
} else {
assert(state == state_t::READY ||
state == state_t::ESTABLISHING);
logger().info("{} protocol {} {} fault, going to CONNECTING {} -- {}",
conn,
get_state_name(state),
where,
io_states,
e_what);
execute_connecting();
}
}
void ProtocolV2::reset_session(bool full)
{
server_cookie = 0;
connect_seq = 0;
if (full) {
client_cookie = generate_client_cookie();
peer_global_seq = 0;
}
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} IOHandler::reset_session({})",
conn, cc_seq, full);
io_states.reset_session(full);
gate.dispatch_in_background(
"reset_session", conn, [this, cc_seq, full] {
return seastar::smp::submit_to(
io_handler.get_shard_id(), [this, cc_seq, full] {
return io_handler.reset_session(cc_seq, full);
});
});
// user can make changes
}
seastar::future<std::tuple<entity_type_t, entity_addr_t>>
ProtocolV2::banner_exchange(bool is_connect)
{
// 1. prepare and send banner
bufferlist banner_payload;
encode((uint64_t)CRIMSON_MSGR2_SUPPORTED_FEATURES, banner_payload, 0);
encode((uint64_t)CEPH_MSGR2_REQUIRED_FEATURES, banner_payload, 0);
bufferlist bl;
bl.append(CEPH_BANNER_V2_PREFIX, strlen(CEPH_BANNER_V2_PREFIX));
auto len_payload = static_cast<uint16_t>(banner_payload.length());
encode(len_payload, bl, 0);
bl.claim_append(banner_payload);
logger().debug("{} SEND({}) banner: len_payload={}, supported={}, "
"required={}, banner=\"{}\"",
conn, bl.length(), len_payload,
CRIMSON_MSGR2_SUPPORTED_FEATURES,
CEPH_MSGR2_REQUIRED_FEATURES,
CEPH_BANNER_V2_PREFIX);
INTERCEPT_CUSTOM(custom_bp_t::BANNER_WRITE, bp_type_t::WRITE);
return frame_assembler->write_flush(std::move(bl)).then([this] {
// 2. read peer banner
unsigned banner_len = strlen(CEPH_BANNER_V2_PREFIX) + sizeof(ceph_le16);
INTERCEPT_CUSTOM(custom_bp_t::BANNER_READ, bp_type_t::READ);
return frame_assembler->read_exactly(banner_len);
}).then([this](auto bptr) {
// 3. process peer banner and read banner_payload
unsigned banner_prefix_len = strlen(CEPH_BANNER_V2_PREFIX);
logger().debug("{} RECV({}) banner: \"{}\"",
conn, bptr.length(),
std::string(bptr.c_str(), banner_prefix_len));
if (memcmp(bptr.c_str(), CEPH_BANNER_V2_PREFIX, banner_prefix_len) != 0) {
if (memcmp(bptr.c_str(), CEPH_BANNER, strlen(CEPH_BANNER)) == 0) {
logger().warn("{} peer is using V1 protocol", conn);
} else {
logger().warn("{} peer sent bad banner", conn);
}
abort_in_fault();
}
bptr.set_offset(bptr.offset() + banner_prefix_len);
bptr.set_length(bptr.length() - banner_prefix_len);
assert(bptr.length() == sizeof(ceph_le16));
uint16_t payload_len;
bufferlist buf;
buf.append(std::move(bptr));
auto ti = buf.cbegin();
try {
decode(payload_len, ti);
} catch (const buffer::error &e) {
logger().warn("{} decode banner payload len failed", conn);
abort_in_fault();
}
logger().debug("{} GOT banner: payload_len={}", conn, payload_len);
INTERCEPT_CUSTOM(custom_bp_t::BANNER_PAYLOAD_READ, bp_type_t::READ);
return frame_assembler->read(payload_len);
}).then([this, is_connect] (bufferlist bl) {
// 4. process peer banner_payload and send HelloFrame
auto p = bl.cbegin();
uint64_t _peer_supported_features;
uint64_t _peer_required_features;
try {
decode(_peer_supported_features, p);
decode(_peer_required_features, p);
} catch (const buffer::error &e) {
logger().warn("{} decode banner payload failed", conn);
abort_in_fault();
}
logger().debug("{} RECV({}) banner features: supported={} required={}",
conn, bl.length(),
_peer_supported_features, _peer_required_features);
// Check feature bit compatibility
uint64_t supported_features = CRIMSON_MSGR2_SUPPORTED_FEATURES;
uint64_t required_features = CEPH_MSGR2_REQUIRED_FEATURES;
if ((required_features & _peer_supported_features) != required_features) {
logger().error("{} peer does not support all required features"
" required={} peer_supported={}",
conn, required_features, _peer_supported_features);
ABORT_IN_CLOSE(is_connect);
}
if ((supported_features & _peer_required_features) != _peer_required_features) {
logger().error("{} we do not support all peer required features"
" peer_required={} supported={}",
conn, _peer_required_features, supported_features);
ABORT_IN_CLOSE(is_connect);
}
peer_supported_features = _peer_supported_features;
bool is_rev1 = HAVE_MSGR2_FEATURE(peer_supported_features, REVISION_1);
frame_assembler->set_is_rev1(is_rev1);
auto hello = HelloFrame::Encode(messenger.get_mytype(),
conn.target_addr);
logger().debug("{} WRITE HelloFrame: my_type={}, peer_addr={}",
conn, ceph_entity_type_name(messenger.get_mytype()),
conn.target_addr);
return frame_assembler->write_flush_frame(hello);
}).then([this] {
//5. read peer HelloFrame
return frame_assembler->read_main_preamble();
}).then([this](auto ret) {
expect_tag(Tag::HELLO, ret.tag, conn, "read_hello_frame");
return frame_assembler->read_frame_payload();
}).then([this](auto payload) {
// 6. process peer HelloFrame
auto hello = HelloFrame::Decode(payload->back());
logger().debug("{} GOT HelloFrame: my_type={} peer_addr={}",
conn, ceph_entity_type_name(hello.entity_type()),
hello.peer_addr());
return seastar::make_ready_future<std::tuple<entity_type_t, entity_addr_t>>(
std::make_tuple(hello.entity_type(), hello.peer_addr()));
});
}
// CONNECTING state
seastar::future<> ProtocolV2::handle_auth_reply()
{
return frame_assembler->read_main_preamble(
).then([this](auto ret) {
switch (ret.tag) {
case Tag::AUTH_BAD_METHOD:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_auth_bad_method() logic
auto bad_method = AuthBadMethodFrame::Decode(payload->back());
logger().warn("{} GOT AuthBadMethodFrame: method={} result={}, "
"allowed_methods={}, allowed_modes={}",
conn, bad_method.method(), cpp_strerror(bad_method.result()),
bad_method.allowed_methods(), bad_method.allowed_modes());
ceph_assert(messenger.get_auth_client());
int r = messenger.get_auth_client()->handle_auth_bad_method(
conn, *auth_meta,
bad_method.method(), bad_method.result(),
bad_method.allowed_methods(), bad_method.allowed_modes());
if (r < 0) {
logger().warn("{} auth_client handle_auth_bad_method returned {}",
conn, r);
abort_in_fault();
}
return client_auth(bad_method.allowed_methods());
});
case Tag::AUTH_REPLY_MORE:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_auth_reply_more() logic
auto auth_more = AuthReplyMoreFrame::Decode(payload->back());
logger().debug("{} GOT AuthReplyMoreFrame: payload_len={}",
conn, auth_more.auth_payload().length());
ceph_assert(messenger.get_auth_client());
// let execute_connecting() take care of the thrown exception
auto reply = messenger.get_auth_client()->handle_auth_reply_more(
conn, *auth_meta, auth_more.auth_payload());
auto more_reply = AuthRequestMoreFrame::Encode(reply);
logger().debug("{} WRITE AuthRequestMoreFrame: payload_len={}",
conn, reply.length());
return frame_assembler->write_flush_frame(more_reply);
}).then([this] {
return handle_auth_reply();
});
case Tag::AUTH_DONE:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_auth_done() logic
auto auth_done = AuthDoneFrame::Decode(payload->back());
logger().debug("{} GOT AuthDoneFrame: gid={}, con_mode={}, payload_len={}",
conn, auth_done.global_id(),
ceph_con_mode_name(auth_done.con_mode()),
auth_done.auth_payload().length());
ceph_assert(messenger.get_auth_client());
int r = messenger.get_auth_client()->handle_auth_done(
conn,
*auth_meta,
auth_done.global_id(),
auth_done.con_mode(),
auth_done.auth_payload());
if (r < 0) {
logger().warn("{} auth_client handle_auth_done returned {}", conn, r);
abort_in_fault();
}
auth_meta->con_mode = auth_done.con_mode();
frame_assembler->create_session_stream_handlers(*auth_meta, false);
return finish_auth();
});
default: {
unexpected_tag(ret.tag, conn, "handle_auth_reply");
return seastar::now();
}
}
});
}
seastar::future<> ProtocolV2::client_auth(std::vector<uint32_t> &allowed_methods)
{
// send_auth_request() logic
ceph_assert(messenger.get_auth_client());
try {
auto [auth_method, preferred_modes, bl] =
messenger.get_auth_client()->get_auth_request(conn, *auth_meta);
auth_meta->auth_method = auth_method;
auto frame = AuthRequestFrame::Encode(auth_method, preferred_modes, bl);
logger().debug("{} WRITE AuthRequestFrame: method={},"
" preferred_modes={}, payload_len={}",
conn, auth_method, preferred_modes, bl.length());
return frame_assembler->write_flush_frame(frame
).then([this] {
return handle_auth_reply();
});
} catch (const crimson::auth::error& e) {
logger().error("{} get_initial_auth_request returned {}", conn, e.what());
ABORT_IN_CLOSE(true);
return seastar::now();
}
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::process_wait()
{
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_wait() logic
logger().debug("{} GOT WaitFrame", conn);
WaitFrame::Decode(payload->back());
return next_step_t::wait;
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::client_connect()
{
// send_client_ident() logic
uint64_t flags = 0;
if (conn.policy.lossy) {
flags |= CEPH_MSG_CONNECT_LOSSY;
}
auto client_ident = ClientIdentFrame::Encode(
messenger.get_myaddrs(),
conn.target_addr,
messenger.get_myname().num(),
global_seq,
conn.policy.features_supported,
conn.policy.features_required | msgr2_required, flags,
client_cookie);
logger().debug("{} WRITE ClientIdentFrame: addrs={}, target={}, gid={},"
" gs={}, features_supported={}, features_required={},"
" flags={}, cookie={}",
conn, messenger.get_myaddrs(), conn.target_addr,
messenger.get_myname().num(), global_seq,
conn.policy.features_supported,
conn.policy.features_required | msgr2_required,
flags, client_cookie);
return frame_assembler->write_flush_frame(client_ident
).then([this] {
return frame_assembler->read_main_preamble();
}).then([this](auto ret) {
switch (ret.tag) {
case Tag::IDENT_MISSING_FEATURES:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_ident_missing_features() logic
auto ident_missing = IdentMissingFeaturesFrame::Decode(payload->back());
logger().warn("{} GOT IdentMissingFeaturesFrame: features={}"
" (client does not support all server features)",
conn, ident_missing.features());
abort_in_fault();
return next_step_t::none;
});
case Tag::WAIT:
return process_wait();
case Tag::SERVER_IDENT:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} at receiving SERVER_IDENT",
conn, get_state_name(state));
abort_protocol();
}
// handle_server_ident() logic
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} IOHandler::requeue_out_sent()",
conn, cc_seq);
io_states.requeue_out_sent();
gate.dispatch_in_background(
"requeue_out_sent", conn, [this, cc_seq] {
return seastar::smp::submit_to(
io_handler.get_shard_id(), [this, cc_seq] {
return io_handler.requeue_out_sent(cc_seq);
});
});
auto server_ident = ServerIdentFrame::Decode(payload->back());
logger().debug("{} GOT ServerIdentFrame:"
" addrs={}, gid={}, gs={},"
" features_supported={}, features_required={},"
" flags={}, cookie={}",
conn,
server_ident.addrs(), server_ident.gid(),
server_ident.global_seq(),
server_ident.supported_features(),
server_ident.required_features(),
server_ident.flags(), server_ident.cookie());
// is this who we intended to talk to?
// be a bit forgiving here, since we may be connecting based on addresses parsed out
// of mon_host or something.
if (!server_ident.addrs().contains(conn.target_addr)) {
logger().warn("{} peer identifies as {}, does not include {}",
conn, server_ident.addrs(), conn.target_addr);
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
server_cookie = server_ident.cookie();
// TODO: change peer_addr to entity_addrvec_t
if (server_ident.addrs().front() != conn.peer_addr) {
logger().warn("{} peer advertises as {}, does not match {}",
conn, server_ident.addrs(), conn.peer_addr);
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
if (conn.get_peer_id() != entity_name_t::NEW &&
conn.get_peer_id() != server_ident.gid()) {
logger().error("{} connection peer id ({}) does not match "
"what it should be ({}) during connecting, close",
conn, server_ident.gid(), conn.get_peer_id());
ABORT_IN_CLOSE(true);
}
conn.set_peer_id(server_ident.gid());
conn.set_features(server_ident.supported_features() &
conn.policy.features_supported);
logger().debug("{} UPDATE: features={}", conn, conn.get_features());
peer_global_seq = server_ident.global_seq();
bool lossy = server_ident.flags() & CEPH_MSG_CONNECT_LOSSY;
if (lossy != conn.policy.lossy) {
logger().warn("{} UPDATE Policy(lossy={}) from server flags", conn, lossy);
conn.policy.lossy = lossy;
}
if (lossy && (connect_seq != 0 || server_cookie != 0)) {
logger().warn("{} UPDATE cs=0({}) sc=0({}) for lossy policy",
conn, connect_seq, server_cookie);
connect_seq = 0;
server_cookie = 0;
}
return seastar::make_ready_future<next_step_t>(next_step_t::ready);
});
default: {
unexpected_tag(ret.tag, conn, "post_client_connect");
return seastar::make_ready_future<next_step_t>(next_step_t::none);
}
}
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::client_reconnect()
{
// send_reconnect() logic
auto reconnect = ReconnectFrame::Encode(messenger.get_myaddrs(),
client_cookie,
server_cookie,
global_seq,
connect_seq,
io_states.in_seq);
logger().debug("{} WRITE ReconnectFrame: addrs={}, client_cookie={},"
" server_cookie={}, gs={}, cs={}, in_seq={}",
conn, messenger.get_myaddrs(),
client_cookie, server_cookie,
global_seq, connect_seq, io_states.in_seq);
return frame_assembler->write_flush_frame(reconnect).then([this] {
return frame_assembler->read_main_preamble();
}).then([this](auto ret) {
switch (ret.tag) {
case Tag::SESSION_RETRY_GLOBAL:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_session_retry_global() logic
auto retry = RetryGlobalFrame::Decode(payload->back());
logger().warn("{} GOT RetryGlobalFrame: gs={}",
conn, retry.global_seq());
global_seq = messenger.get_global_seq(retry.global_seq());
logger().warn("{} UPDATE: gs={} for retry global", conn, global_seq);
return client_reconnect();
});
case Tag::SESSION_RETRY:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_session_retry() logic
auto retry = RetryFrame::Decode(payload->back());
logger().warn("{} GOT RetryFrame: cs={}",
conn, retry.connect_seq());
connect_seq = retry.connect_seq() + 1;
logger().warn("{} UPDATE: cs={}", conn, connect_seq);
return client_reconnect();
});
case Tag::SESSION_RESET:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} before reset_session()",
conn, get_state_name(state));
abort_protocol();
}
// handle_session_reset() logic
auto reset = ResetFrame::Decode(payload->back());
logger().warn("{} GOT ResetFrame: full={}", conn, reset.full());
reset_session(reset.full());
// user can make changes
return client_connect();
});
case Tag::WAIT:
return process_wait();
case Tag::SESSION_RECONNECT_OK:
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} at receiving RECONNECT_OK",
conn, get_state_name(state));
abort_protocol();
}
// handle_reconnect_ok() logic
auto reconnect_ok = ReconnectOkFrame::Decode(payload->back());
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} GOT ReconnectOkFrame: msg_seq={}, "
"send {} IOHandler::requeue_out_sent_up_to()",
conn, reconnect_ok.msg_seq(), cc_seq);
io_states.requeue_out_sent_up_to();
auto msg_seq = reconnect_ok.msg_seq();
gate.dispatch_in_background(
"requeue_out_reconnecting", conn, [this, cc_seq, msg_seq] {
return seastar::smp::submit_to(
io_handler.get_shard_id(), [this, cc_seq, msg_seq] {
return io_handler.requeue_out_sent_up_to(cc_seq, msg_seq);
});
});
return seastar::make_ready_future<next_step_t>(next_step_t::ready);
});
default: {
unexpected_tag(ret.tag, conn, "post_client_reconnect");
return seastar::make_ready_future<next_step_t>(next_step_t::none);
}
}
});
}
void ProtocolV2::execute_connecting()
{
ceph_assert_always(!is_socket_valid);
trigger_state(state_t::CONNECTING, io_state_t::delay);
gated_execute("execute_connecting", conn, [this] {
global_seq = messenger.get_global_seq();
assert(client_cookie != 0);
if (!conn.policy.lossy && server_cookie != 0) {
++connect_seq;
logger().debug("{} UPDATE: gs={}, cs={} for reconnect",
conn, global_seq, connect_seq);
} else { // conn.policy.lossy || server_cookie == 0
assert(connect_seq == 0);
assert(server_cookie == 0);
logger().debug("{} UPDATE: gs={} for connect", conn, global_seq);
}
return wait_exit_io().then([this] {
#ifdef UNIT_TESTS_BUILT
// process custom_bp_t::SOCKET_CONNECTING
// supports CONTINUE/FAULT/BLOCK
if (conn.interceptor) {
auto action = conn.interceptor->intercept(
conn.get_local_shared_foreign_from_this(),
{custom_bp_t::SOCKET_CONNECTING});
switch (action) {
case bp_action_t::CONTINUE:
return seastar::now();
case bp_action_t::FAULT:
logger().info("[Test] got FAULT");
abort_in_fault();
case bp_action_t::BLOCK:
logger().info("[Test] got BLOCK");
return conn.interceptor->blocker.block();
default:
ceph_abort("unexpected action from trap");
return seastar::now();
}
} else {
return seastar::now();
}
}).then([this] {
#endif
ceph_assert_always(frame_assembler);
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} before Socket::connect()",
conn, get_state_name(state));
abort_protocol();
}
return Socket::connect(conn.peer_addr);
}).then([this](SocketRef _new_socket) {
logger().debug("{} socket connected", conn);
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} during Socket::connect()",
conn, get_state_name(state));
return _new_socket->close().then([sock=std::move(_new_socket)] {
abort_protocol();
});
}
SocketFRef new_socket = seastar::make_foreign(std::move(_new_socket));
if (!has_socket) {
frame_assembler->set_socket(std::move(new_socket));
has_socket = true;
} else {
gate.dispatch_in_background(
"replace_socket_connecting",
conn,
[this, new_socket=std::move(new_socket)]() mutable {
return frame_assembler->replace_shutdown_socket(std::move(new_socket));
}
);
}
is_socket_valid = true;
return seastar::now();
}).then([this] {
auth_meta = seastar::make_lw_shared<AuthConnectionMeta>();
frame_assembler->reset_handlers();
frame_assembler->start_recording();
return banner_exchange(true);
}).then([this] (auto&& ret) {
auto [_peer_type, _my_addr_from_peer] = std::move(ret);
if (conn.get_peer_type() != _peer_type) {
logger().warn("{} connection peer type does not match what peer advertises {} != {}",
conn, ceph_entity_type_name(conn.get_peer_type()),
ceph_entity_type_name(_peer_type));
ABORT_IN_CLOSE(true);
}
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} during banner_exchange(), abort",
conn, get_state_name(state));
abort_protocol();
}
frame_assembler->learn_socket_ephemeral_port_as_connector(
_my_addr_from_peer.get_port());
if (unlikely(_my_addr_from_peer.is_legacy())) {
logger().warn("{} peer sent a legacy address for me: {}",
conn, _my_addr_from_peer);
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
_my_addr_from_peer.set_type(entity_addr_t::TYPE_MSGR2);
messenger.learned_addr(_my_addr_from_peer, conn);
return client_auth();
}).then([this] {
if (server_cookie == 0) {
ceph_assert(connect_seq == 0);
return client_connect();
} else {
ceph_assert(connect_seq > 0);
return client_reconnect();
}
}).then([this] (next_step_t next) {
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} at the end of execute_connecting()",
conn, get_state_name(state));
abort_protocol();
}
switch (next) {
case next_step_t::ready: {
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} before dispatch_connect(), abort",
conn, get_state_name(state));
abort_protocol();
}
auto cc_seq = crosscore.prepare_submit();
logger().info("{} connected: gs={}, pgs={}, cs={}, "
"client_cookie={}, server_cookie={}, {}, new_sid={}, "
"send {} IOHandler::dispatch_connect()",
conn, global_seq, peer_global_seq, connect_seq,
client_cookie, server_cookie, io_states,
frame_assembler->get_socket_shard_id(), cc_seq);
// set io_handler to a new shard
auto new_io_shard = frame_assembler->get_socket_shard_id();
ConnectionFRef conn_fref = seastar::make_foreign(
conn.shared_from_this());
ceph_assert_always(!pr_switch_io_shard.has_value());
pr_switch_io_shard = seastar::shared_promise<>();
return seastar::smp::submit_to(
io_handler.get_shard_id(),
[this, cc_seq, new_io_shard,
conn_fref=std::move(conn_fref)]() mutable {
return io_handler.dispatch_connect(
cc_seq, new_io_shard, std::move(conn_fref));
}).then([this, new_io_shard] {
ceph_assert_always(io_handler.get_shard_id() == new_io_shard);
pr_switch_io_shard->set_value();
pr_switch_io_shard = std::nullopt;
// user can make changes
if (unlikely(state != state_t::CONNECTING)) {
logger().debug("{} triggered {} after dispatch_connect(), abort",
conn, get_state_name(state));
abort_protocol();
}
execute_ready();
});
}
case next_step_t::wait: {
logger().info("{} execute_connecting(): going to WAIT(max-backoff)", conn);
ceph_assert_always(is_socket_valid);
frame_assembler->shutdown_socket<true>(&gate);
is_socket_valid = false;
execute_wait(true);
return seastar::now();
}
default: {
ceph_abort("impossible next step");
}
}
}).handle_exception([this](std::exception_ptr eptr) {
fault(state_t::CONNECTING, "execute_connecting", eptr);
});
});
}
// ACCEPTING state
seastar::future<> ProtocolV2::_auth_bad_method(int r)
{
// _auth_bad_method() logic
ceph_assert(r < 0);
auto [allowed_methods, allowed_modes] =
messenger.get_auth_server()->get_supported_auth_methods(conn.get_peer_type());
auto bad_method = AuthBadMethodFrame::Encode(
auth_meta->auth_method, r, allowed_methods, allowed_modes);
logger().warn("{} WRITE AuthBadMethodFrame: method={}, result={}, "
"allowed_methods={}, allowed_modes={})",
conn, auth_meta->auth_method, cpp_strerror(r),
allowed_methods, allowed_modes);
return frame_assembler->write_flush_frame(bad_method
).then([this] {
return server_auth();
});
}
seastar::future<> ProtocolV2::_handle_auth_request(bufferlist& auth_payload, bool more)
{
// _handle_auth_request() logic
ceph_assert(messenger.get_auth_server());
bufferlist reply;
int r = messenger.get_auth_server()->handle_auth_request(
conn,
*auth_meta,
more,
auth_meta->auth_method,
auth_payload,
&conn.peer_global_id,
&reply);
switch (r) {
// successful
case 1: {
auto auth_done = AuthDoneFrame::Encode(
conn.peer_global_id, auth_meta->con_mode, reply);
logger().debug("{} WRITE AuthDoneFrame: gid={}, con_mode={}, payload_len={}",
conn, conn.peer_global_id,
ceph_con_mode_name(auth_meta->con_mode), reply.length());
return frame_assembler->write_flush_frame(auth_done
).then([this] {
ceph_assert(auth_meta);
frame_assembler->create_session_stream_handlers(*auth_meta, true);
return finish_auth();
});
}
// auth more
case 0: {
auto more = AuthReplyMoreFrame::Encode(reply);
logger().debug("{} WRITE AuthReplyMoreFrame: payload_len={}",
conn, reply.length());
return frame_assembler->write_flush_frame(more
).then([this] {
return frame_assembler->read_main_preamble();
}).then([this](auto ret) {
expect_tag(Tag::AUTH_REQUEST_MORE, ret.tag, conn, "read_auth_request_more");
return frame_assembler->read_frame_payload();
}).then([this](auto payload) {
auto auth_more = AuthRequestMoreFrame::Decode(payload->back());
logger().debug("{} GOT AuthRequestMoreFrame: payload_len={}",
conn, auth_more.auth_payload().length());
return _handle_auth_request(auth_more.auth_payload(), true);
});
}
case -EBUSY: {
logger().warn("{} auth_server handle_auth_request returned -EBUSY", conn);
abort_in_fault();
return seastar::now();
}
default: {
logger().warn("{} auth_server handle_auth_request returned {}", conn, r);
return _auth_bad_method(r);
}
}
}
seastar::future<> ProtocolV2::server_auth()
{
return frame_assembler->read_main_preamble(
).then([this](auto ret) {
expect_tag(Tag::AUTH_REQUEST, ret.tag, conn, "read_auth_request");
return frame_assembler->read_frame_payload();
}).then([this](auto payload) {
// handle_auth_request() logic
auto request = AuthRequestFrame::Decode(payload->back());
logger().debug("{} GOT AuthRequestFrame: method={}, preferred_modes={},"
" payload_len={}",
conn, request.method(), request.preferred_modes(),
request.auth_payload().length());
auth_meta->auth_method = request.method();
auth_meta->con_mode = messenger.get_auth_server()->pick_con_mode(
conn.get_peer_type(), auth_meta->auth_method,
request.preferred_modes());
if (auth_meta->con_mode == CEPH_CON_MODE_UNKNOWN) {
logger().warn("{} auth_server pick_con_mode returned mode CEPH_CON_MODE_UNKNOWN", conn);
return _auth_bad_method(-EOPNOTSUPP);
}
return _handle_auth_request(request.auth_payload(), false);
});
}
bool ProtocolV2::validate_peer_name(const entity_name_t& peer_name) const
{
auto my_peer_name = conn.get_peer_name();
if (my_peer_name.type() != peer_name.type()) {
return false;
}
if (my_peer_name.num() != entity_name_t::NEW &&
peer_name.num() != entity_name_t::NEW &&
my_peer_name.num() != peer_name.num()) {
return false;
}
return true;
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::send_wait()
{
auto wait = WaitFrame::Encode();
logger().debug("{} WRITE WaitFrame", conn);
return frame_assembler->write_flush_frame(wait
).then([] {
return next_step_t::wait;
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::reuse_connection(
ProtocolV2* existing_proto, bool do_reset,
bool reconnect, uint64_t conn_seq, uint64_t msg_seq)
{
if (unlikely(state != state_t::ACCEPTING)) {
logger().debug("{} triggered {} before trigger_replacing()",
conn, get_state_name(state));
abort_protocol();
}
existing_proto->trigger_replacing(reconnect,
do_reset,
frame_assembler->to_replace(),
std::move(auth_meta),
peer_global_seq,
client_cookie,
conn.get_peer_name(),
conn.get_features(),
peer_supported_features,
conn_seq,
msg_seq);
ceph_assert_always(has_socket && is_socket_valid);
is_socket_valid = false;
has_socket = false;
#ifdef UNIT_TESTS_BUILT
if (conn.interceptor) {
conn.interceptor->register_conn_replaced(
conn.get_local_shared_foreign_from_this());
}
#endif
// close this connection because all the necessary information is delivered
// to the exisiting connection, and jump to error handling code to abort the
// current state.
ABORT_IN_CLOSE(false);
return seastar::make_ready_future<next_step_t>(next_step_t::none);
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::handle_existing_connection(SocketConnectionRef existing_conn)
{
// handle_existing_connection() logic
ProtocolV2 *existing_proto = dynamic_cast<ProtocolV2*>(
existing_conn->protocol.get());
ceph_assert(existing_proto);
logger().debug("{}(gs={}, pgs={}, cs={}, cc={}, sc={}) connecting,"
" found existing {}(state={}, gs={}, pgs={}, cs={}, cc={}, sc={})",
conn, global_seq, peer_global_seq, connect_seq,
client_cookie, server_cookie,
fmt::ptr(existing_conn.get()), get_state_name(existing_proto->state),
existing_proto->global_seq,
existing_proto->peer_global_seq,
existing_proto->connect_seq,
existing_proto->client_cookie,
existing_proto->server_cookie);
if (!validate_peer_name(existing_conn->get_peer_name())) {
logger().error("{} server_connect: my peer_name doesn't match"
" the existing connection {}, abort", conn, fmt::ptr(existing_conn.get()));
abort_in_fault();
}
if (existing_proto->state == state_t::REPLACING) {
logger().warn("{} server_connect: racing replace happened while"
" replacing existing connection {}, send wait.",
conn, *existing_conn);
return send_wait();
}
if (existing_proto->peer_global_seq > peer_global_seq) {
logger().warn("{} server_connect:"
" this is a stale connection, because peer_global_seq({})"
" < existing->peer_global_seq({}), close this connection"
" in favor of existing connection {}",
conn, peer_global_seq,
existing_proto->peer_global_seq, *existing_conn);
abort_in_fault();
}
if (existing_conn->policy.lossy) {
// existing connection can be thrown out in favor of this one
logger().warn("{} server_connect:"
" existing connection {} is a lossy channel. Close existing in favor of"
" this connection", conn, *existing_conn);
if (unlikely(state != state_t::ACCEPTING)) {
logger().debug("{} triggered {} before execute_establishing()",
conn, get_state_name(state));
abort_protocol();
}
execute_establishing(existing_conn);
return seastar::make_ready_future<next_step_t>(next_step_t::ready);
}
if (existing_proto->server_cookie != 0) {
if (existing_proto->client_cookie != client_cookie) {
// Found previous session
// peer has reset and we're going to reuse the existing connection
// by replacing the socket
logger().warn("{} server_connect:"
" found new session (cs={})"
" when existing {} {} is with stale session (cs={}, ss={}),"
" peer must have reset",
conn,
client_cookie,
get_state_name(existing_proto->state),
*existing_conn,
existing_proto->client_cookie,
existing_proto->server_cookie);
return reuse_connection(existing_proto, conn.policy.resetcheck);
} else {
// session establishment interrupted between client_ident and server_ident,
// continuing...
logger().warn("{} server_connect: found client session with existing {} {}"
" matched (cs={}, ss={}), continuing session establishment",
conn,
get_state_name(existing_proto->state),
*existing_conn,
client_cookie,
existing_proto->server_cookie);
return reuse_connection(existing_proto);
}
} else {
// Looks like a connection race: server and client are both connecting to
// each other at the same time.
if (existing_proto->client_cookie != client_cookie) {
if (existing_conn->peer_wins()) {
// acceptor (this connection, the peer) wins
logger().warn("{} server_connect: connection race detected (cs={}, e_cs={}, ss=0)"
" and win, reusing existing {} {}",
conn,
client_cookie,
existing_proto->client_cookie,
get_state_name(existing_proto->state),
*existing_conn);
return reuse_connection(existing_proto);
} else {
// acceptor (this connection, the peer) loses
logger().warn("{} server_connect: connection race detected (cs={}, e_cs={}, ss=0)"
" and lose to existing {}, ask client to wait",
conn, client_cookie, existing_proto->client_cookie, *existing_conn);
return existing_conn->send_keepalive().then([this] {
return send_wait();
});
}
} else {
logger().warn("{} server_connect: found client session with existing {} {}"
" matched (cs={}, ss={}), continuing session establishment",
conn,
get_state_name(existing_proto->state),
*existing_conn,
client_cookie,
existing_proto->server_cookie);
return reuse_connection(existing_proto);
}
}
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::server_connect()
{
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_client_ident() logic
auto client_ident = ClientIdentFrame::Decode(payload->back());
logger().debug("{} GOT ClientIdentFrame: addrs={}, target={},"
" gid={}, gs={}, features_supported={},"
" features_required={}, flags={}, cookie={}",
conn, client_ident.addrs(), client_ident.target_addr(),
client_ident.gid(), client_ident.global_seq(),
client_ident.supported_features(),
client_ident.required_features(),
client_ident.flags(), client_ident.cookie());
if (client_ident.addrs().empty() ||
client_ident.addrs().front() == entity_addr_t()) {
logger().warn("{} oops, client_ident.addrs() is empty", conn);
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
if (!messenger.get_myaddrs().contains(client_ident.target_addr())) {
logger().warn("{} peer is trying to reach {} which is not us ({})",
conn, client_ident.target_addr(), messenger.get_myaddrs());
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
conn.peer_addr = client_ident.addrs().front();
logger().debug("{} UPDATE: peer_addr={}", conn, conn.peer_addr);
conn.target_addr = conn.peer_addr;
if (!conn.policy.lossy && !conn.policy.server && conn.target_addr.get_port() <= 0) {
logger().warn("{} we don't know how to reconnect to peer {}",
conn, conn.target_addr);
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
if (conn.get_peer_id() != entity_name_t::NEW &&
conn.get_peer_id() != client_ident.gid()) {
logger().error("{} client_ident peer_id ({}) does not match"
" what it should be ({}) during accepting, abort",
conn, client_ident.gid(), conn.get_peer_id());
abort_in_fault();
}
conn.set_peer_id(client_ident.gid());
client_cookie = client_ident.cookie();
uint64_t feat_missing =
(conn.policy.features_required | msgr2_required) &
~(uint64_t)client_ident.supported_features();
if (feat_missing) {
auto ident_missing_features = IdentMissingFeaturesFrame::Encode(feat_missing);
logger().warn("{} WRITE IdentMissingFeaturesFrame: features={} (peer missing)",
conn, feat_missing);
return frame_assembler->write_flush_frame(ident_missing_features
).then([] {
return next_step_t::wait;
});
}
conn.set_features(client_ident.supported_features() &
conn.policy.features_supported);
logger().debug("{} UPDATE: features={}", conn, conn.get_features());
peer_global_seq = client_ident.global_seq();
bool lossy = client_ident.flags() & CEPH_MSG_CONNECT_LOSSY;
if (lossy != conn.policy.lossy) {
logger().warn("{} my lossy policy {} doesn't match client {}, ignore",
conn, conn.policy.lossy, lossy);
}
// Looks good so far, let's check if there is already an existing connection
// to this peer.
SocketConnectionRef existing_conn = messenger.lookup_conn(conn.peer_addr);
if (existing_conn) {
return handle_existing_connection(existing_conn);
} else {
if (unlikely(state != state_t::ACCEPTING)) {
logger().debug("{} triggered {} before execute_establishing()",
conn, get_state_name(state));
abort_protocol();
}
execute_establishing(nullptr);
return seastar::make_ready_future<next_step_t>(next_step_t::ready);
}
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::read_reconnect()
{
return frame_assembler->read_main_preamble(
).then([this](auto ret) {
expect_tag(Tag::SESSION_RECONNECT, ret.tag, conn, "read_session_reconnect");
return server_reconnect();
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::send_retry(uint64_t connect_seq)
{
auto retry = RetryFrame::Encode(connect_seq);
logger().warn("{} WRITE RetryFrame: cs={}", conn, connect_seq);
return frame_assembler->write_flush_frame(retry
).then([this] {
return read_reconnect();
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::send_retry_global(uint64_t global_seq)
{
auto retry = RetryGlobalFrame::Encode(global_seq);
logger().warn("{} WRITE RetryGlobalFrame: gs={}", conn, global_seq);
return frame_assembler->write_flush_frame(retry
).then([this] {
return read_reconnect();
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::send_reset(bool full)
{
auto reset = ResetFrame::Encode(full);
logger().warn("{} WRITE ResetFrame: full={}", conn, full);
return frame_assembler->write_flush_frame(reset
).then([this] {
return frame_assembler->read_main_preamble();
}).then([this](auto ret) {
expect_tag(Tag::CLIENT_IDENT, ret.tag, conn, "post_send_reset");
return server_connect();
});
}
seastar::future<ProtocolV2::next_step_t>
ProtocolV2::server_reconnect()
{
return frame_assembler->read_frame_payload(
).then([this](auto payload) {
// handle_reconnect() logic
auto reconnect = ReconnectFrame::Decode(payload->back());
logger().debug("{} GOT ReconnectFrame: addrs={}, client_cookie={},"
" server_cookie={}, gs={}, cs={}, msg_seq={}",
conn, reconnect.addrs(),
reconnect.client_cookie(), reconnect.server_cookie(),
reconnect.global_seq(), reconnect.connect_seq(),
reconnect.msg_seq());
// can peer_addrs be changed on-the-fly?
// TODO: change peer_addr to entity_addrvec_t
entity_addr_t paddr = reconnect.addrs().front();
if (paddr.is_msgr2() || paddr.is_any()) {
// good
} else {
logger().warn("{} peer's address {} is not v2", conn, paddr);
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
if (conn.peer_addr == entity_addr_t()) {
conn.peer_addr = paddr;
} else if (conn.peer_addr != paddr) {
logger().error("{} peer identifies as {}, while conn.peer_addr={},"
" reconnect failed",
conn, paddr, conn.peer_addr);
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
peer_global_seq = reconnect.global_seq();
SocketConnectionRef existing_conn = messenger.lookup_conn(conn.peer_addr);
if (!existing_conn) {
// there is no existing connection therefore cannot reconnect to previous
// session
logger().warn("{} server_reconnect: no existing connection from address {},"
" reseting client", conn, conn.peer_addr);
return send_reset(true);
}
ProtocolV2 *existing_proto = dynamic_cast<ProtocolV2*>(
existing_conn->protocol.get());
ceph_assert(existing_proto);
logger().debug("{}(gs={}, pgs={}, cs={}, cc={}, sc={}) re-connecting,"
" found existing {}(state={}, gs={}, pgs={}, cs={}, cc={}, sc={})",
conn, global_seq, peer_global_seq, reconnect.connect_seq(),
reconnect.client_cookie(), reconnect.server_cookie(),
fmt::ptr(existing_conn.get()),
get_state_name(existing_proto->state),
existing_proto->global_seq,
existing_proto->peer_global_seq,
existing_proto->connect_seq,
existing_proto->client_cookie,
existing_proto->server_cookie);
if (!validate_peer_name(existing_conn->get_peer_name())) {
logger().error("{} server_reconnect: my peer_name doesn't match"
" the existing connection {}, abort", conn, fmt::ptr(existing_conn.get()));
abort_in_fault();
}
if (existing_proto->state == state_t::REPLACING) {
logger().warn("{} server_reconnect: racing replace happened while "
" replacing existing connection {}, retry global.",
conn, *existing_conn);
return send_retry_global(existing_proto->peer_global_seq);
}
if (existing_proto->client_cookie != reconnect.client_cookie()) {
logger().warn("{} server_reconnect:"
" client_cookie mismatch with existing connection {},"
" cc={} rcc={}. I must have reset, reseting client.",
conn, *existing_conn,
existing_proto->client_cookie, reconnect.client_cookie());
return send_reset(conn.policy.resetcheck);
} else if (existing_proto->server_cookie == 0) {
// this happens when:
// - a connects to b
// - a sends client_ident
// - b gets client_ident, sends server_ident and sets cookie X
// - connection fault
// - b reconnects to a with cookie X, connect_seq=1
// - a has cookie==0
logger().warn("{} server_reconnect: I was a client (cc={}) and didn't received the"
" server_ident with existing connection {}."
" Asking peer to resume session establishment",
conn, existing_proto->client_cookie, *existing_conn);
return send_reset(false);
}
if (existing_proto->peer_global_seq > reconnect.global_seq()) {
logger().warn("{} server_reconnect: stale global_seq: exist_pgs({}) > peer_gs({}),"
" with existing connection {},"
" ask client to retry global",
conn, existing_proto->peer_global_seq,
reconnect.global_seq(), *existing_conn);
return send_retry_global(existing_proto->peer_global_seq);
}
if (existing_proto->connect_seq > reconnect.connect_seq()) {
logger().warn("{} server_reconnect: stale peer connect_seq peer_cs({}) < exist_cs({}),"
" with existing connection {}, ask client to retry",
conn, reconnect.connect_seq(),
existing_proto->connect_seq, *existing_conn);
return send_retry(existing_proto->connect_seq);
} else if (existing_proto->connect_seq == reconnect.connect_seq()) {
// reconnect race: both peers are sending reconnect messages
if (existing_conn->peer_wins()) {
// acceptor (this connection, the peer) wins
logger().warn("{} server_reconnect: reconnect race detected (cs={})"
" and win, reusing existing {} {}",
conn,
reconnect.connect_seq(),
get_state_name(existing_proto->state),
*existing_conn);
return reuse_connection(
existing_proto, false,
true, reconnect.connect_seq(), reconnect.msg_seq());
} else {
// acceptor (this connection, the peer) loses
logger().warn("{} server_reconnect: reconnect race detected (cs={})"
" and lose to existing {}, ask client to wait",
conn, reconnect.connect_seq(), *existing_conn);
return send_wait();
}
} else { // existing_proto->connect_seq < reconnect.connect_seq()
logger().warn("{} server_reconnect: stale exsiting connect_seq exist_cs({}) < peer_cs({}),"
" reusing existing {} {}",
conn,
existing_proto->connect_seq,
reconnect.connect_seq(),
get_state_name(existing_proto->state),
*existing_conn);
return reuse_connection(
existing_proto, false,
true, reconnect.connect_seq(), reconnect.msg_seq());
}
});
}
void ProtocolV2::execute_accepting()
{
assert(is_socket_valid);
trigger_state(state_t::ACCEPTING, io_state_t::none);
gate.dispatch_in_background("execute_accepting", conn, [this] {
return seastar::futurize_invoke([this] {
#ifdef UNIT_TESTS_BUILT
if (conn.interceptor) {
auto action = conn.interceptor->intercept(
conn.get_local_shared_foreign_from_this(),
{custom_bp_t::SOCKET_ACCEPTED});
switch (action) {
case bp_action_t::CONTINUE:
break;
case bp_action_t::FAULT:
logger().info("[Test] got FAULT");
abort_in_fault();
default:
ceph_abort("unexpected action from trap");
}
}
#endif
auth_meta = seastar::make_lw_shared<AuthConnectionMeta>();
frame_assembler->reset_handlers();
frame_assembler->start_recording();
return banner_exchange(false);
}).then([this] (auto&& ret) {
auto [_peer_type, _my_addr_from_peer] = std::move(ret);
ceph_assert(conn.get_peer_type() == 0);
conn.set_peer_type(_peer_type);
conn.policy = messenger.get_policy(_peer_type);
logger().info("{} UPDATE: peer_type={},"
" policy(lossy={} server={} standby={} resetcheck={})",
conn, ceph_entity_type_name(_peer_type),
conn.policy.lossy, conn.policy.server,
conn.policy.standby, conn.policy.resetcheck);
if (!messenger.get_myaddr().is_blank_ip() &&
(messenger.get_myaddr().get_port() != _my_addr_from_peer.get_port() ||
messenger.get_myaddr().get_nonce() != _my_addr_from_peer.get_nonce())) {
logger().warn("{} my_addr_from_peer {} port/nonce doesn't match myaddr {}",
conn, _my_addr_from_peer, messenger.get_myaddr());
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
messenger.learned_addr(_my_addr_from_peer, conn);
return server_auth();
}).then([this] {
return frame_assembler->read_main_preamble();
}).then([this](auto ret) {
switch (ret.tag) {
case Tag::CLIENT_IDENT:
return server_connect();
case Tag::SESSION_RECONNECT:
return server_reconnect();
default: {
unexpected_tag(ret.tag, conn, "post_server_auth");
return seastar::make_ready_future<next_step_t>(next_step_t::none);
}
}
}).then([this] (next_step_t next) {
switch (next) {
case next_step_t::ready:
assert(state != state_t::ACCEPTING);
break;
case next_step_t::wait:
if (unlikely(state != state_t::ACCEPTING)) {
logger().debug("{} triggered {} at the end of execute_accepting()",
conn, get_state_name(state));
abort_protocol();
}
logger().info("{} execute_accepting(): going to SERVER_WAIT", conn);
execute_server_wait();
break;
default:
ceph_abort("impossible next step");
}
}).handle_exception([this](std::exception_ptr eptr) {
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
logger().info("{} execute_accepting(): fault at {}, going to CLOSING -- {}",
conn, get_state_name(state), e_what);
do_close(false);
});
});
}
// CONNECTING or ACCEPTING state
seastar::future<> ProtocolV2::finish_auth()
{
ceph_assert(auth_meta);
auto records = frame_assembler->stop_recording();
const auto sig = auth_meta->session_key.empty() ? sha256_digest_t() :
auth_meta->session_key.hmac_sha256(nullptr, records.rxbuf);
auto sig_frame = AuthSignatureFrame::Encode(sig);
logger().debug("{} WRITE AuthSignatureFrame: signature={}", conn, sig);
return frame_assembler->write_flush_frame(sig_frame
).then([this] {
return frame_assembler->read_main_preamble();
}).then([this](auto ret) {
expect_tag(Tag::AUTH_SIGNATURE, ret.tag, conn, "post_finish_auth");
return frame_assembler->read_frame_payload();
}).then([this, txbuf=std::move(records.txbuf)](auto payload) {
// handle_auth_signature() logic
auto sig_frame = AuthSignatureFrame::Decode(payload->back());
logger().debug("{} GOT AuthSignatureFrame: signature={}", conn, sig_frame.signature());
const auto actual_tx_sig = auth_meta->session_key.empty() ?
sha256_digest_t() : auth_meta->session_key.hmac_sha256(nullptr, txbuf);
if (sig_frame.signature() != actual_tx_sig) {
logger().warn("{} pre-auth signature mismatch actual_tx_sig={}"
" sig_frame.signature()={}",
conn, actual_tx_sig, sig_frame.signature());
abort_in_fault();
}
});
}
// ESTABLISHING
void ProtocolV2::execute_establishing(SocketConnectionRef existing_conn) {
auto accept_me = [this] {
messenger.register_conn(
seastar::static_pointer_cast<SocketConnection>(
conn.shared_from_this()));
messenger.unaccept_conn(
seastar::static_pointer_cast<SocketConnection>(
conn.shared_from_this()));
};
ceph_assert_always(is_socket_valid);
trigger_state(state_t::ESTABLISHING, io_state_t::delay);
bool is_replace;
if (existing_conn) {
logger().info("{} start establishing: gs={}, pgs={}, cs={}, "
"client_cookie={}, server_cookie={}, {}, new_sid={}, "
"close existing {}",
conn, global_seq, peer_global_seq, connect_seq,
client_cookie, server_cookie,
io_states, frame_assembler->get_socket_shard_id(),
*existing_conn);
is_replace = true;
ProtocolV2 *existing_proto = dynamic_cast<ProtocolV2*>(
existing_conn->protocol.get());
existing_proto->do_close(
true, // is_dispatch_reset
std::move(accept_me));
if (unlikely(state != state_t::ESTABLISHING)) {
logger().warn("{} triggered {} during execute_establishing(), "
"the accept event will not be delivered!",
conn, get_state_name(state));
abort_protocol();
}
} else {
logger().info("{} start establishing: gs={}, pgs={}, cs={}, "
"client_cookie={}, server_cookie={}, {}, new_sid={}, "
"no existing",
conn, global_seq, peer_global_seq, connect_seq,
client_cookie, server_cookie, io_states,
frame_assembler->get_socket_shard_id());
is_replace = false;
accept_me();
}
gated_execute("execute_establishing", conn, [this, is_replace] {
ceph_assert_always(state == state_t::ESTABLISHING);
// set io_handler to a new shard
auto cc_seq = crosscore.prepare_submit();
auto new_io_shard = frame_assembler->get_socket_shard_id();
logger().debug("{} send {} IOHandler::dispatch_accept({})",
conn, cc_seq, new_io_shard);
ConnectionFRef conn_fref = seastar::make_foreign(
conn.shared_from_this());
ceph_assert_always(!pr_switch_io_shard.has_value());
pr_switch_io_shard = seastar::shared_promise<>();
return seastar::smp::submit_to(
io_handler.get_shard_id(),
[this, cc_seq, new_io_shard, is_replace,
conn_fref=std::move(conn_fref)]() mutable {
return io_handler.dispatch_accept(
cc_seq, new_io_shard, std::move(conn_fref), is_replace);
}).then([this, new_io_shard] {
ceph_assert_always(io_handler.get_shard_id() == new_io_shard);
pr_switch_io_shard->set_value();
pr_switch_io_shard = std::nullopt;
// user can make changes
if (unlikely(state != state_t::ESTABLISHING)) {
logger().debug("{} triggered {} after dispatch_accept() during execute_establishing()",
conn, get_state_name(state));
abort_protocol();
}
return send_server_ident();
}).then([this] {
if (unlikely(state != state_t::ESTABLISHING)) {
logger().debug("{} triggered {} at the end of execute_establishing()",
conn, get_state_name(state));
abort_protocol();
}
logger().info("{} established, going to ready", conn);
execute_ready();
}).handle_exception([this](std::exception_ptr eptr) {
fault(state_t::ESTABLISHING, "execute_establishing", eptr);
});
});
}
// ESTABLISHING or REPLACING state
seastar::future<>
ProtocolV2::send_server_ident()
{
ceph_assert_always(state == state_t::ESTABLISHING ||
state == state_t::REPLACING);
// send_server_ident() logic
// refered to async-conn v2: not assign gs to global_seq
global_seq = messenger.get_global_seq();
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} UPDATE: gs={} for server ident, "
"send {} IOHandler::reset_peer_state()",
conn, global_seq, cc_seq);
// this is required for the case when this connection is being replaced
io_states.reset_peer_state();
gate.dispatch_in_background(
"reset_peer_state", conn, [this, cc_seq] {
return seastar::smp::submit_to(
io_handler.get_shard_id(), [this, cc_seq] {
return io_handler.reset_peer_state(cc_seq);
});
});
if (!conn.policy.lossy) {
server_cookie = ceph::util::generate_random_number<uint64_t>(1, -1ll);
}
uint64_t flags = 0;
if (conn.policy.lossy) {
flags = flags | CEPH_MSG_CONNECT_LOSSY;
}
auto server_ident = ServerIdentFrame::Encode(
messenger.get_myaddrs(),
messenger.get_myname().num(),
global_seq,
conn.policy.features_supported,
conn.policy.features_required | msgr2_required,
flags,
server_cookie);
logger().debug("{} WRITE ServerIdentFrame: addrs={}, gid={},"
" gs={}, features_supported={}, features_required={},"
" flags={}, cookie={}",
conn, messenger.get_myaddrs(), messenger.get_myname().num(),
global_seq, conn.policy.features_supported,
conn.policy.features_required | msgr2_required,
flags, server_cookie);
return frame_assembler->write_flush_frame(server_ident);
}
// REPLACING state
void ProtocolV2::trigger_replacing(bool reconnect,
bool do_reset,
FrameAssemblerV2::mover_t &&mover,
AuthConnectionMetaRef&& new_auth_meta,
uint64_t new_peer_global_seq,
uint64_t new_client_cookie,
entity_name_t new_peer_name,
uint64_t new_conn_features,
uint64_t new_peer_supported_features,
uint64_t new_connect_seq,
uint64_t new_msg_seq)
{
ceph_assert_always(state >= state_t::ESTABLISHING);
ceph_assert_always(state <= state_t::WAIT);
ceph_assert_always(has_socket || state == state_t::CONNECTING);
ceph_assert_always(!mover.socket->is_shutdown());
logger().info("{} start replacing ({}): pgs was {}, cs was {}, "
"client_cookie was {}, {}, new_sid={}",
conn, reconnect ? "reconnected" : "connected",
peer_global_seq, connect_seq, client_cookie,
io_states, mover.socket->get_shard_id());
if (is_socket_valid) {
frame_assembler->shutdown_socket<true>(&gate);
is_socket_valid = false;
}
trigger_state_phase1(state_t::REPLACING);
gate.dispatch_in_background(
"trigger_replacing",
conn,
[this,
reconnect,
do_reset,
mover = std::move(mover),
new_auth_meta = std::move(new_auth_meta),
new_client_cookie, new_peer_name,
new_conn_features, new_peer_supported_features,
new_peer_global_seq,
new_connect_seq, new_msg_seq] () mutable {
ceph_assert_always(state == state_t::REPLACING);
auto new_io_shard = mover.socket->get_shard_id();
// state may become CLOSING below, but we cannot abort the chain until
// mover.socket is correctly handled (closed or replaced).
// this is preemptive
return wait_switch_io_shard(
).then([this] {
if (unlikely(state != state_t::REPLACING)) {
ceph_assert_always(state == state_t::CLOSING);
return seastar::now();
}
trigger_state_phase2(state_t::REPLACING, io_state_t::delay);
return wait_exit_io();
}).then([this] {
if (unlikely(state != state_t::REPLACING)) {
ceph_assert_always(state == state_t::CLOSING);
return seastar::now();
}
ceph_assert_always(frame_assembler);
protocol_timer.cancel();
auto done = std::move(execution_done);
execution_done = seastar::now();
return done;
}).then([this, new_io_shard] {
if (unlikely(state != state_t::REPLACING)) {
ceph_assert_always(state == state_t::CLOSING);
return seastar::now();
}
// set io_handler to a new shard
// we should prevent parallel switching core attemps
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} IOHandler::dispatch_accept({})",
conn, cc_seq, new_io_shard);
ConnectionFRef conn_fref = seastar::make_foreign(
conn.shared_from_this());
ceph_assert_always(!pr_switch_io_shard.has_value());
pr_switch_io_shard = seastar::shared_promise<>();
return seastar::smp::submit_to(
io_handler.get_shard_id(),
[this, cc_seq, new_io_shard,
conn_fref=std::move(conn_fref)]() mutable {
return io_handler.dispatch_accept(
cc_seq, new_io_shard, std::move(conn_fref), false);
}).then([this, new_io_shard] {
ceph_assert_always(io_handler.get_shard_id() == new_io_shard);
pr_switch_io_shard->set_value();
pr_switch_io_shard = std::nullopt;
// user can make changes
});
}).then([this,
reconnect,
do_reset,
mover = std::move(mover),
new_auth_meta = std::move(new_auth_meta),
new_client_cookie, new_peer_name,
new_conn_features, new_peer_supported_features,
new_peer_global_seq,
new_connect_seq, new_msg_seq] () mutable {
if (state == state_t::REPLACING && do_reset) {
reset_session(true);
// user can make changes
}
if (unlikely(state != state_t::REPLACING)) {
logger().debug("{} triggered {} in the middle of trigger_replacing(), abort",
conn, get_state_name(state));
ceph_assert_always(state == state_t::CLOSING);
return mover.socket->close(
).then([sock = std::move(mover.socket)] {
abort_protocol();
});
}
auth_meta = std::move(new_auth_meta);
peer_global_seq = new_peer_global_seq;
gate.dispatch_in_background(
"replace_frame_assembler",
conn,
[this, mover=std::move(mover)]() mutable {
return frame_assembler->replace_by(std::move(mover));
}
);
is_socket_valid = true;
has_socket = true;
if (reconnect) {
connect_seq = new_connect_seq;
// send_reconnect_ok() logic
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} IOHandler::requeue_out_sent_up_to({})",
conn, cc_seq, new_msg_seq);
io_states.requeue_out_sent_up_to();
gate.dispatch_in_background(
"requeue_out_replacing", conn, [this, cc_seq, new_msg_seq] {
return seastar::smp::submit_to(
io_handler.get_shard_id(), [this, cc_seq, new_msg_seq] {
return io_handler.requeue_out_sent_up_to(cc_seq, new_msg_seq);
});
});
auto reconnect_ok = ReconnectOkFrame::Encode(io_states.in_seq);
logger().debug("{} WRITE ReconnectOkFrame: msg_seq={}", conn, io_states.in_seq);
return frame_assembler->write_flush_frame(reconnect_ok);
} else {
client_cookie = new_client_cookie;
assert(conn.get_peer_type() == new_peer_name.type());
if (conn.get_peer_id() == entity_name_t::NEW) {
conn.set_peer_id(new_peer_name.num());
}
conn.set_features(new_conn_features);
peer_supported_features = new_peer_supported_features;
bool is_rev1 = HAVE_MSGR2_FEATURE(peer_supported_features, REVISION_1);
frame_assembler->set_is_rev1(is_rev1);
return send_server_ident();
}
}).then([this, reconnect] {
if (unlikely(state != state_t::REPLACING)) {
logger().debug("{} triggered {} at the end of trigger_replacing(), abort",
conn, get_state_name(state));
ceph_assert_always(state == state_t::CLOSING);
abort_protocol();
}
logger().info("{} replaced ({}), going to ready: "
"gs={}, pgs={}, cs={}, "
"client_cookie={}, server_cookie={}, {}",
conn, reconnect ? "reconnected" : "connected",
global_seq, peer_global_seq, connect_seq,
client_cookie, server_cookie, io_states);
execute_ready();
}).handle_exception([this](std::exception_ptr eptr) {
fault(state_t::REPLACING, "trigger_replacing", eptr);
});
});
}
// READY state
seastar::future<> ProtocolV2::notify_out_fault(
crosscore_t::seq_t cc_seq,
const char *where,
std::exception_ptr eptr,
io_handler_state _io_states)
{
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} notify_out_fault(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq, where, eptr, _io_states] {
return notify_out_fault(cc_seq, where, eptr, _io_states);
});
}
io_states = _io_states;
logger().debug("{} got {} notify_out_fault(): io_states={}",
conn, cc_seq, io_states);
fault(state_t::READY, where, eptr);
return seastar::now();
}
void ProtocolV2::execute_ready()
{
assert(conn.policy.lossy || (client_cookie != 0 && server_cookie != 0));
protocol_timer.cancel();
ceph_assert_always(is_socket_valid);
// I'm not responsible to shutdown the socket at READY
is_socket_valid = false;
trigger_state(state_t::READY, io_state_t::open);
}
// STANDBY state
void ProtocolV2::execute_standby()
{
ceph_assert_always(!is_socket_valid);
trigger_state(state_t::STANDBY, io_state_t::delay);
}
seastar::future<> ProtocolV2::notify_out(
crosscore_t::seq_t cc_seq)
{
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} notify_out(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq] {
return notify_out(cc_seq);
});
}
logger().debug("{} got {} notify_out(): at {}",
conn, cc_seq, get_state_name(state));
io_states.is_out_queued = true;
if (unlikely(state == state_t::STANDBY && !conn.policy.server)) {
logger().info("{} notify_out(): at {}, going to CONNECTING",
conn, get_state_name(state));
execute_connecting();
}
return seastar::now();
}
// WAIT state
void ProtocolV2::execute_wait(bool max_backoff)
{
ceph_assert_always(!is_socket_valid);
trigger_state(state_t::WAIT, io_state_t::delay);
gated_execute("execute_wait", conn, [this, max_backoff] {
double backoff = protocol_timer.last_dur();
if (max_backoff) {
backoff = local_conf().get_val<double>("ms_max_backoff");
} else if (backoff > 0) {
backoff = std::min(local_conf().get_val<double>("ms_max_backoff"), 2 * backoff);
} else {
backoff = local_conf().get_val<double>("ms_initial_backoff");
}
return protocol_timer.backoff(backoff).then([this] {
if (unlikely(state != state_t::WAIT)) {
logger().debug("{} triggered {} at the end of execute_wait()",
conn, get_state_name(state));
abort_protocol();
}
logger().info("{} execute_wait(): going to CONNECTING", conn);
execute_connecting();
}).handle_exception([this](std::exception_ptr eptr) {
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
logger().info("{} execute_wait(): protocol aborted at {} -- {}",
conn, get_state_name(state), e_what);
assert(state == state_t::REPLACING ||
state == state_t::CLOSING);
});
});
}
// SERVER_WAIT state
void ProtocolV2::execute_server_wait()
{
ceph_assert_always(is_socket_valid);
trigger_state(state_t::SERVER_WAIT, io_state_t::none);
gated_execute("execute_server_wait", conn, [this] {
return frame_assembler->read_exactly(1
).then([this](auto bptr) {
logger().warn("{} SERVER_WAIT got read, abort", conn);
abort_in_fault();
}).handle_exception([this](std::exception_ptr eptr) {
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
logger().info("{} execute_server_wait(): fault at {}, going to CLOSING -- {}",
conn, get_state_name(state), e_what);
do_close(false);
});
});
}
// CLOSING state
seastar::future<> ProtocolV2::notify_mark_down(
crosscore_t::seq_t cc_seq)
{
assert(seastar::this_shard_id() == conn.get_messenger_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} notify_mark_down(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq] {
return notify_mark_down(cc_seq);
});
}
logger().debug("{} got {} notify_mark_down()",
conn, cc_seq);
do_close(false);
return seastar::now();
}
seastar::future<> ProtocolV2::close_clean_yielded()
{
// yield() so that do_close() can be called *after* close_clean_yielded() is
// applied to all connections in a container using
// seastar::parallel_for_each(). otherwise, we could erase a connection in
// the container when seastar::parallel_for_each() is still iterating in it.
// that'd lead to a segfault.
return seastar::yield(
).then([this] {
do_close(false);
return pr_closed_clean.get_shared_future();
// connection may be unreferenced from the messenger,
// so need to hold the additional reference.
}).finally([conn_ref = conn.shared_from_this()] {});;
}
void ProtocolV2::do_close(
bool is_dispatch_reset,
std::optional<std::function<void()>> f_accept_new)
{
if (state == state_t::CLOSING) {
// already closing
return;
}
bool is_replace = f_accept_new ? true : false;
logger().info("{} closing: reset {}, replace {}", conn,
is_dispatch_reset ? "yes" : "no",
is_replace ? "yes" : "no");
/*
* atomic operations
*/
ceph_assert_always(!gate.is_closed());
// messenger registrations, must before user events
messenger.closing_conn(
seastar::static_pointer_cast<SocketConnection>(
conn.shared_from_this()));
if (state == state_t::ACCEPTING || state == state_t::SERVER_WAIT) {
messenger.unaccept_conn(
seastar::static_pointer_cast<SocketConnection>(
conn.shared_from_this()));
} else if (state >= state_t::ESTABLISHING && state < state_t::CLOSING) {
messenger.unregister_conn(
seastar::static_pointer_cast<SocketConnection>(
conn.shared_from_this()));
} else {
// cannot happen
ceph_assert(false);
}
if (f_accept_new) {
// the replacing connection must be registerred after the replaced
// connection is unreigsterred.
(*f_accept_new)();
}
protocol_timer.cancel();
if (is_socket_valid) {
frame_assembler->shutdown_socket<true>(&gate);
is_socket_valid = false;
}
trigger_state_phase1(state_t::CLOSING);
gate.dispatch_in_background(
"close_io", conn, [this, is_dispatch_reset, is_replace] {
// this is preemptive
return wait_switch_io_shard(
).then([this, is_dispatch_reset, is_replace] {
trigger_state_phase2(state_t::CLOSING, io_state_t::drop);
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} IOHandler::close_io(reset={}, replace={})",
conn, cc_seq, is_dispatch_reset, is_replace);
std::ignore = gate.close(
).then([this] {
ceph_assert_always(!need_exit_io);
ceph_assert_always(!pr_exit_io.has_value());
if (has_socket) {
ceph_assert_always(frame_assembler);
return frame_assembler->close_shutdown_socket();
} else {
return seastar::now();
}
}).then([this] {
logger().debug("{} closed!", conn);
messenger.closed_conn(
seastar::static_pointer_cast<SocketConnection>(
conn.shared_from_this()));
pr_closed_clean.set_value();
#ifdef UNIT_TESTS_BUILT
closed_clean = true;
if (conn.interceptor) {
conn.interceptor->register_conn_closed(
conn.get_local_shared_foreign_from_this());
}
#endif
// connection is unreferenced from the messenger,
// so need to hold the additional reference.
}).handle_exception([conn_ref = conn.shared_from_this(), this] (auto eptr) {
logger().error("{} closing got unexpected exception {}",
conn, eptr);
ceph_abort();
});
return seastar::smp::submit_to(
io_handler.get_shard_id(),
[this, cc_seq, is_dispatch_reset, is_replace] {
return io_handler.close_io(cc_seq, is_dispatch_reset, is_replace);
});
// user can make changes
});
});
}
} // namespace crimson::net
| 90,658 | 37.660554 | 97 | cc |
null | ceph-main/src/crimson/net/ProtocolV2.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/shared_future.hh>
#include <seastar/core/sleep.hh>
#include "io_handler.h"
namespace crimson::net {
class ProtocolV2 final : public HandshakeListener {
using AuthConnectionMetaRef = seastar::lw_shared_ptr<AuthConnectionMeta>;
public:
ProtocolV2(SocketConnection &,
IOHandler &);
~ProtocolV2() final;
ProtocolV2(const ProtocolV2 &) = delete;
ProtocolV2(ProtocolV2 &&) = delete;
ProtocolV2 &operator=(const ProtocolV2 &) = delete;
ProtocolV2 &operator=(ProtocolV2 &&) = delete;
/**
* as HandshakeListener
*/
private:
seastar::future<> notify_out(
crosscore_t::seq_t cc_seq) final;
seastar::future<> notify_out_fault(
crosscore_t::seq_t cc_seq,
const char *where,
std::exception_ptr,
io_handler_state) final;
seastar::future<> notify_mark_down(
crosscore_t::seq_t cc_seq) final;
/*
* as ProtocolV2 to be called by SocketConnection
*/
public:
void start_connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name);
void start_accept(SocketFRef&& socket,
const entity_addr_t& peer_addr);
seastar::future<> close_clean_yielded();
#ifdef UNIT_TESTS_BUILT
bool is_closed_clean() const {
return closed_clean;
}
bool is_closed() const {
return state == state_t::CLOSING;
}
#endif
private:
using io_state_t = IOHandler::io_state_t;
seastar::future<> wait_switch_io_shard() {
if (pr_switch_io_shard.has_value()) {
return pr_switch_io_shard->get_shared_future();
} else {
return seastar::now();
}
}
seastar::future<> wait_exit_io() {
if (pr_exit_io.has_value()) {
return pr_exit_io->get_shared_future();
} else {
assert(!need_exit_io);
return seastar::now();
}
}
enum class state_t {
NONE = 0,
ACCEPTING,
SERVER_WAIT,
ESTABLISHING,
CONNECTING,
READY,
STANDBY,
WAIT,
REPLACING,
CLOSING
};
static const char *get_state_name(state_t state) {
const char *const statenames[] = {"NONE",
"ACCEPTING",
"SERVER_WAIT",
"ESTABLISHING",
"CONNECTING",
"READY",
"STANDBY",
"WAIT",
"REPLACING",
"CLOSING"};
return statenames[static_cast<int>(state)];
}
void trigger_state_phase1(state_t new_state);
void trigger_state_phase2(state_t new_state, io_state_t new_io_state);
void trigger_state(state_t new_state, io_state_t new_io_state) {
ceph_assert_always(!pr_switch_io_shard.has_value());
trigger_state_phase1(new_state);
trigger_state_phase2(new_state, new_io_state);
}
template <typename Func, typename T>
void gated_execute(const char *what, T &who, Func &&func) {
gate.dispatch_in_background(what, who, [this, &who, &func] {
if (!execution_done.available()) {
// discard the unready future
gate.dispatch_in_background(
"gated_execute_abandon",
who,
[fut=std::move(execution_done)]() mutable {
return std::move(fut);
}
);
}
seastar::promise<> pr;
execution_done = pr.get_future();
return seastar::futurize_invoke(std::forward<Func>(func)
).finally([pr=std::move(pr)]() mutable {
pr.set_value();
});
});
}
void fault(state_t expected_state,
const char *where,
std::exception_ptr eptr);
void reset_session(bool is_full);
seastar::future<std::tuple<entity_type_t, entity_addr_t>>
banner_exchange(bool is_connect);
enum class next_step_t {
ready,
wait,
none, // protocol should have been aborted or failed
};
// CONNECTING (client)
seastar::future<> handle_auth_reply();
inline seastar::future<> client_auth() {
std::vector<uint32_t> empty;
return client_auth(empty);
}
seastar::future<> client_auth(std::vector<uint32_t> &allowed_methods);
seastar::future<next_step_t> process_wait();
seastar::future<next_step_t> client_connect();
seastar::future<next_step_t> client_reconnect();
void execute_connecting();
// ACCEPTING (server)
seastar::future<> _auth_bad_method(int r);
seastar::future<> _handle_auth_request(bufferlist& auth_payload, bool more);
seastar::future<> server_auth();
bool validate_peer_name(const entity_name_t& peer_name) const;
seastar::future<next_step_t> send_wait();
seastar::future<next_step_t> reuse_connection(ProtocolV2* existing_proto,
bool do_reset=false,
bool reconnect=false,
uint64_t conn_seq=0,
uint64_t msg_seq=0);
seastar::future<next_step_t> handle_existing_connection(SocketConnectionRef existing_conn);
seastar::future<next_step_t> server_connect();
seastar::future<next_step_t> read_reconnect();
seastar::future<next_step_t> send_retry(uint64_t connect_seq);
seastar::future<next_step_t> send_retry_global(uint64_t global_seq);
seastar::future<next_step_t> send_reset(bool full);
seastar::future<next_step_t> server_reconnect();
void execute_accepting();
// CONNECTING/ACCEPTING
seastar::future<> finish_auth();
// ESTABLISHING
void execute_establishing(SocketConnectionRef existing_conn);
// ESTABLISHING/REPLACING (server)
seastar::future<> send_server_ident();
// REPLACING (server)
void trigger_replacing(bool reconnect,
bool do_reset,
FrameAssemblerV2::mover_t &&mover,
AuthConnectionMetaRef&& new_auth_meta,
uint64_t new_peer_global_seq,
// !reconnect
uint64_t new_client_cookie,
entity_name_t new_peer_name,
uint64_t new_conn_features,
uint64_t new_peer_supported_features,
// reconnect
uint64_t new_connect_seq,
uint64_t new_msg_seq);
// READY
void execute_ready();
// STANDBY
void execute_standby();
// WAIT
void execute_wait(bool max_backoff);
// SERVER_WAIT
void execute_server_wait();
// CLOSING
// reentrant
void do_close(bool is_dispatch_reset,
std::optional<std::function<void()>> f_accept_new=std::nullopt);
private:
SocketConnection &conn;
SocketMessenger &messenger;
IOHandler &io_handler;
// asynchronously populated from io_handler
io_handler_state io_states;
crosscore_t crosscore;
bool has_socket = false;
// the socket exists and it is not shutdown
bool is_socket_valid = false;
FrameAssemblerV2Ref frame_assembler;
bool need_notify_out = false;
std::optional<seastar::shared_promise<>> pr_switch_io_shard;
bool need_exit_io = false;
std::optional<seastar::shared_promise<>> pr_exit_io;
AuthConnectionMetaRef auth_meta;
crimson::common::Gated gate;
seastar::shared_promise<> pr_closed_clean;
#ifdef UNIT_TESTS_BUILT
bool closed_clean = false;
#endif
state_t state = state_t::NONE;
uint64_t peer_supported_features = 0;
uint64_t client_cookie = 0;
uint64_t server_cookie = 0;
uint64_t global_seq = 0;
uint64_t peer_global_seq = 0;
uint64_t connect_seq = 0;
seastar::future<> execution_done = seastar::now();
class Timer {
double last_dur_ = 0.0;
const SocketConnection& conn;
std::optional<seastar::abort_source> as;
public:
Timer(SocketConnection& conn) : conn(conn) {}
double last_dur() const { return last_dur_; }
seastar::future<> backoff(double seconds);
void cancel() {
last_dur_ = 0.0;
if (as) {
as->request_abort();
as = std::nullopt;
}
}
};
Timer protocol_timer;
};
struct create_handlers_ret {
std::unique_ptr<ConnectionHandler> io_handler;
std::unique_ptr<ProtocolV2> protocol;
};
inline create_handlers_ret create_handlers(ChainedDispatchers &dispatchers, SocketConnection &conn) {
std::unique_ptr<ConnectionHandler> io_handler = std::make_unique<IOHandler>(dispatchers, conn);
IOHandler &io_handler_concrete = static_cast<IOHandler&>(*io_handler);
auto protocol = std::make_unique<ProtocolV2>(conn, io_handler_concrete);
io_handler_concrete.set_handshake_listener(*protocol);
return {std::move(io_handler), std::move(protocol)};
}
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::ProtocolV2> : fmt::ostream_formatter {};
#endif
| 8,992 | 27.015576 | 101 | h |
null | ceph-main/src/crimson/net/Socket.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Socket.h"
#include <seastar/core/sleep.hh>
#include <seastar/core/when_all.hh>
#include <seastar/net/packet.hh>
#include "crimson/common/log.h"
#include "Errors.h"
using crimson::common::local_conf;
namespace crimson::net {
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_ms);
}
using tmp_buf = seastar::temporary_buffer<char>;
using packet = seastar::net::packet;
// an input_stream consumer that reads buffer segments into a bufferlist up to
// the given number of remaining bytes
struct bufferlist_consumer {
bufferlist& bl;
size_t& remaining;
bufferlist_consumer(bufferlist& bl, size_t& remaining)
: bl(bl), remaining(remaining) {}
using consumption_result_type = typename seastar::input_stream<char>::consumption_result_type;
// consume some or all of a buffer segment
seastar::future<consumption_result_type> operator()(tmp_buf&& data) {
if (remaining >= data.size()) {
// consume the whole buffer
remaining -= data.size();
bl.append(buffer::create(std::move(data)));
if (remaining > 0) {
// return none to request more segments
return seastar::make_ready_future<consumption_result_type>(
seastar::continue_consuming{});
} else {
// return an empty buffer to singal that we're done
return seastar::make_ready_future<consumption_result_type>(
consumption_result_type::stop_consuming_type({}));
}
}
if (remaining > 0) {
// consume the front
bl.append(buffer::create(data.share(0, remaining)));
data.trim_front(remaining);
remaining = 0;
}
// give the rest back to signal that we're done
return seastar::make_ready_future<consumption_result_type>(
consumption_result_type::stop_consuming_type{std::move(data)});
};
};
seastar::future<> inject_delay()
{
if (float delay_period = local_conf()->ms_inject_internal_delays;
delay_period) {
logger().debug("Socket::inject_delay: sleep for {}", delay_period);
return seastar::sleep(
std::chrono::milliseconds((int)(delay_period * 1000.0)));
}
return seastar::now();
}
void inject_failure()
{
if (local_conf()->ms_inject_socket_failures) {
uint64_t rand =
ceph::util::generate_random_number<uint64_t>(1, RAND_MAX);
if (rand % local_conf()->ms_inject_socket_failures == 0) {
logger().warn("Socket::inject_failure: injecting socket failure");
throw std::system_error(make_error_code(
error::negotiation_failure));
}
}
}
} // anonymous namespace
Socket::Socket(
seastar::connected_socket &&_socket,
side_t _side,
uint16_t e_port,
construct_tag)
: sid{seastar::this_shard_id()},
socket(std::move(_socket)),
in(socket.input()),
// the default buffer size 8192 is too small that may impact our write
// performance. see seastar::net::connected_socket::output()
out(socket.output(65536)),
socket_is_shutdown(false),
side(_side),
ephemeral_port(e_port)
{
}
Socket::~Socket()
{
assert(seastar::this_shard_id() == sid);
#ifndef NDEBUG
assert(closed);
#endif
}
seastar::future<bufferlist>
Socket::read(size_t bytes)
{
assert(seastar::this_shard_id() == sid);
#ifdef UNIT_TESTS_BUILT
return try_trap_pre(next_trap_read).then([bytes, this] {
#endif
if (bytes == 0) {
return seastar::make_ready_future<bufferlist>();
}
r.buffer.clear();
r.remaining = bytes;
return in.consume(bufferlist_consumer{r.buffer, r.remaining}).then([this] {
if (r.remaining) { // throw on short reads
throw std::system_error(make_error_code(error::read_eof));
}
inject_failure();
return inject_delay().then([this] {
return seastar::make_ready_future<bufferlist>(std::move(r.buffer));
});
});
#ifdef UNIT_TESTS_BUILT
}).then([this](auto buf) {
return try_trap_post(next_trap_read
).then([buf = std::move(buf)]() mutable {
return std::move(buf);
});
});
#endif
}
seastar::future<bufferptr>
Socket::read_exactly(size_t bytes) {
assert(seastar::this_shard_id() == sid);
#ifdef UNIT_TESTS_BUILT
return try_trap_pre(next_trap_read).then([bytes, this] {
#endif
if (bytes == 0) {
return seastar::make_ready_future<bufferptr>();
}
return in.read_exactly(bytes).then([bytes](auto buf) {
bufferptr ptr(buffer::create(buf.share()));
if (ptr.length() < bytes) {
throw std::system_error(make_error_code(error::read_eof));
}
inject_failure();
return inject_delay(
).then([ptr = std::move(ptr)]() mutable {
return seastar::make_ready_future<bufferptr>(std::move(ptr));
});
});
#ifdef UNIT_TESTS_BUILT
}).then([this](auto ptr) {
return try_trap_post(next_trap_read
).then([ptr = std::move(ptr)]() mutable {
return std::move(ptr);
});
});
#endif
}
seastar::future<>
Socket::write(bufferlist buf)
{
assert(seastar::this_shard_id() == sid);
#ifdef UNIT_TESTS_BUILT
return try_trap_pre(next_trap_write
).then([buf = std::move(buf), this]() mutable {
#endif
inject_failure();
return inject_delay(
).then([buf = std::move(buf), this]() mutable {
packet p(std::move(buf));
return out.write(std::move(p));
});
#ifdef UNIT_TESTS_BUILT
}).then([this] {
return try_trap_post(next_trap_write);
});
#endif
}
seastar::future<>
Socket::flush()
{
assert(seastar::this_shard_id() == sid);
inject_failure();
return inject_delay().then([this] {
return out.flush();
});
}
seastar::future<>
Socket::write_flush(bufferlist buf)
{
assert(seastar::this_shard_id() == sid);
#ifdef UNIT_TESTS_BUILT
return try_trap_pre(next_trap_write
).then([buf = std::move(buf), this]() mutable {
#endif
inject_failure();
return inject_delay(
).then([buf = std::move(buf), this]() mutable {
packet p(std::move(buf));
return out.write(std::move(p)
).then([this] {
return out.flush();
});
});
#ifdef UNIT_TESTS_BUILT
}).then([this] {
return try_trap_post(next_trap_write);
});
#endif
}
void Socket::shutdown()
{
assert(seastar::this_shard_id() == sid);
socket_is_shutdown = true;
socket.shutdown_input();
socket.shutdown_output();
}
static inline seastar::future<>
close_and_handle_errors(seastar::output_stream<char>& out)
{
return out.close().handle_exception_type([](const std::system_error& e) {
if (e.code() != std::errc::broken_pipe &&
e.code() != std::errc::connection_reset) {
logger().error("Socket::close(): unexpected error {}", e.what());
ceph_abort();
}
// can happen when out is already shutdown, ignore
});
}
seastar::future<>
Socket::close()
{
assert(seastar::this_shard_id() == sid);
#ifndef NDEBUG
ceph_assert_always(!closed);
closed = true;
#endif
return seastar::when_all_succeed(
inject_delay(),
in.close(),
close_and_handle_errors(out)
).then_unpack([] {
return seastar::make_ready_future<>();
}).handle_exception([](auto eptr) {
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
logger().error("Socket::close(): unexpected exception {}", e_what);
ceph_abort();
});
}
seastar::future<SocketRef>
Socket::connect(const entity_addr_t &peer_addr)
{
inject_failure();
return inject_delay(
).then([peer_addr] {
return seastar::connect(peer_addr.in4_addr());
}).then([peer_addr](seastar::connected_socket socket) {
auto ret = std::make_unique<Socket>(
std::move(socket), side_t::connector, 0, construct_tag{});
logger().debug("Socket::connect(): connected to {}, socket {}",
peer_addr, fmt::ptr(ret));
return ret;
});
}
#ifdef UNIT_TESTS_BUILT
void Socket::set_trap(bp_type_t type, bp_action_t action, socket_blocker* blocker_) {
assert(seastar::this_shard_id() == sid);
blocker = blocker_;
if (type == bp_type_t::READ) {
ceph_assert_always(next_trap_read == bp_action_t::CONTINUE);
next_trap_read = action;
} else { // type == bp_type_t::WRITE
if (next_trap_write == bp_action_t::CONTINUE) {
next_trap_write = action;
} else if (next_trap_write == bp_action_t::FAULT) {
// do_sweep_messages() may combine multiple write events into one socket write
ceph_assert_always(action == bp_action_t::FAULT || action == bp_action_t::CONTINUE);
} else {
ceph_abort();
}
}
}
seastar::future<>
Socket::try_trap_pre(bp_action_t& trap) {
auto action = trap;
trap = bp_action_t::CONTINUE;
switch (action) {
case bp_action_t::CONTINUE:
break;
case bp_action_t::FAULT:
logger().info("[Test] got FAULT");
throw std::system_error(make_error_code(error::negotiation_failure));
case bp_action_t::BLOCK:
logger().info("[Test] got BLOCK");
return blocker->block();
case bp_action_t::STALL:
trap = action;
break;
default:
ceph_abort("unexpected action from trap");
}
return seastar::make_ready_future<>();
}
seastar::future<>
Socket::try_trap_post(bp_action_t& trap) {
auto action = trap;
trap = bp_action_t::CONTINUE;
switch (action) {
case bp_action_t::CONTINUE:
break;
case bp_action_t::STALL:
logger().info("[Test] got STALL and block");
force_shutdown();
return blocker->block();
default:
ceph_abort("unexpected action from trap");
}
return seastar::make_ready_future<>();
}
#endif
ShardedServerSocket::ShardedServerSocket(
seastar::shard_id sid,
bool dispatch_only_on_primary_sid,
construct_tag)
: primary_sid{sid}, dispatch_only_on_primary_sid{dispatch_only_on_primary_sid}
{
}
ShardedServerSocket::~ShardedServerSocket()
{
assert(!listener);
// detect whether user have called destroy() properly
ceph_assert_always(!service);
}
listen_ertr::future<>
ShardedServerSocket::listen(entity_addr_t addr)
{
ceph_assert_always(seastar::this_shard_id() == primary_sid);
logger().debug("ShardedServerSocket({})::listen()...", addr);
return this->container().invoke_on_all([addr](auto& ss) {
ss.listen_addr = addr;
seastar::socket_address s_addr(addr.in4_addr());
seastar::listen_options lo;
lo.reuse_address = true;
if (ss.dispatch_only_on_primary_sid) {
lo.set_fixed_cpu(ss.primary_sid);
}
ss.listener = seastar::listen(s_addr, lo);
}).then([] {
return listen_ertr::now();
}).handle_exception_type(
[addr](const std::system_error& e) -> listen_ertr::future<> {
if (e.code() == std::errc::address_in_use) {
logger().debug("ShardedServerSocket({})::listen(): address in use", addr);
return crimson::ct_error::address_in_use::make();
} else if (e.code() == std::errc::address_not_available) {
logger().debug("ShardedServerSocket({})::listen(): address not available",
addr);
return crimson::ct_error::address_not_available::make();
}
logger().error("ShardedServerSocket({})::listen(): "
"got unexpeted error {}", addr, e.what());
ceph_abort();
});
}
seastar::future<>
ShardedServerSocket::accept(accept_func_t &&_fn_accept)
{
ceph_assert_always(seastar::this_shard_id() == primary_sid);
logger().debug("ShardedServerSocket({})::accept()...", listen_addr);
return this->container().invoke_on_all([_fn_accept](auto &ss) {
assert(ss.listener);
ss.fn_accept = _fn_accept;
// gate accepting
// ShardedServerSocket::shutdown() will drain the continuations in the gate
// so ignore the returned future
std::ignore = seastar::with_gate(ss.shutdown_gate, [&ss] {
return seastar::keep_doing([&ss] {
return ss.listener->accept(
).then([&ss](seastar::accept_result accept_result) {
#ifndef NDEBUG
if (ss.dispatch_only_on_primary_sid) {
// see seastar::listen_options::set_fixed_cpu()
ceph_assert_always(seastar::this_shard_id() == ss.primary_sid);
}
#endif
auto [socket, paddr] = std::move(accept_result);
entity_addr_t peer_addr;
peer_addr.set_sockaddr(&paddr.as_posix_sockaddr());
peer_addr.set_type(ss.listen_addr.get_type());
SocketRef _socket = std::make_unique<Socket>(
std::move(socket), Socket::side_t::acceptor,
peer_addr.get_port(), Socket::construct_tag{});
logger().debug("ShardedServerSocket({})::accept(): accepted peer {}, "
"socket {}, dispatch_only_on_primary_sid = {}",
ss.listen_addr, peer_addr, fmt::ptr(_socket),
ss.dispatch_only_on_primary_sid);
std::ignore = seastar::with_gate(
ss.shutdown_gate,
[socket=std::move(_socket), peer_addr, &ss]() mutable {
return ss.fn_accept(std::move(socket), peer_addr
).handle_exception([&ss, peer_addr](auto eptr) {
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
logger().error("ShardedServerSocket({})::accept(): "
"fn_accept(s, {}) got unexpected exception {}",
ss.listen_addr, peer_addr, e_what);
ceph_abort();
});
});
});
}).handle_exception_type([&ss](const std::system_error& e) {
if (e.code() == std::errc::connection_aborted ||
e.code() == std::errc::invalid_argument) {
logger().debug("ShardedServerSocket({})::accept(): stopped ({})",
ss.listen_addr, e.what());
} else {
throw;
}
}).handle_exception([&ss](auto eptr) {
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
logger().error("ShardedServerSocket({})::accept(): "
"got unexpected exception {}", ss.listen_addr, e_what);
ceph_abort();
});
});
});
}
seastar::future<>
ShardedServerSocket::shutdown_destroy()
{
assert(seastar::this_shard_id() == primary_sid);
logger().debug("ShardedServerSocket({})::shutdown_destroy()...", listen_addr);
// shutdown shards
return this->container().invoke_on_all([](auto& ss) {
if (ss.listener) {
ss.listener->abort_accept();
}
return ss.shutdown_gate.close();
}).then([this] {
// destroy shards
return this->container().invoke_on_all([](auto& ss) {
assert(ss.shutdown_gate.is_closed());
ss.listen_addr = entity_addr_t();
ss.listener.reset();
});
}).then([this] {
// stop the sharded service: we should only construct/stop shards on #0
return this->container().invoke_on(0, [](auto& ss) {
assert(ss.service);
return ss.service->stop().finally([cleanup = std::move(ss.service)] {});
});
});
}
seastar::future<ShardedServerSocket*>
ShardedServerSocket::create(bool dispatch_only_on_this_shard)
{
auto primary_sid = seastar::this_shard_id();
// start the sharded service: we should only construct/stop shards on #0
return seastar::smp::submit_to(0, [primary_sid, dispatch_only_on_this_shard] {
auto service = std::make_unique<sharded_service_t>();
return service->start(
primary_sid, dispatch_only_on_this_shard, construct_tag{}
).then([service = std::move(service)]() mutable {
auto p_shard = service.get();
p_shard->local().service = std::move(service);
return p_shard;
});
}).then([](auto p_shard) {
return &p_shard->local();
});
}
} // namespace crimson::net
| 15,839 | 29.403071 | 96 | cc |
null | ceph-main/src/crimson/net/Socket.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/gate.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/sharded.hh>
#include "include/buffer.h"
#include "crimson/common/log.h"
#include "Errors.h"
#include "Fwd.h"
#ifdef UNIT_TESTS_BUILT
#include "Interceptor.h"
#endif
namespace crimson::net {
class Socket;
using SocketRef = std::unique_ptr<Socket>;
using SocketFRef = seastar::foreign_ptr<SocketRef>;
class Socket {
struct construct_tag {};
public:
// if acceptor side, peer is using a different port (ephemeral_port)
// if connector side, I'm using a different port (ephemeral_port)
enum class side_t {
acceptor,
connector
};
Socket(seastar::connected_socket &&, side_t, uint16_t e_port, construct_tag);
~Socket();
Socket(Socket&& o) = delete;
seastar::shard_id get_shard_id() const {
return sid;
}
side_t get_side() const {
return side;
}
uint16_t get_ephemeral_port() const {
return ephemeral_port;
}
seastar::socket_address get_local_address() const {
return socket.local_address();
}
bool is_shutdown() const {
assert(seastar::this_shard_id() == sid);
return socket_is_shutdown;
}
// learn my ephemeral_port as connector.
// unfortunately, there's no way to identify which port I'm using as
// connector with current seastar interface.
void learn_ephemeral_port_as_connector(uint16_t port) {
assert(side == side_t::connector &&
(ephemeral_port == 0 || ephemeral_port == port));
ephemeral_port = port;
}
/// read the requested number of bytes into a bufferlist
seastar::future<bufferlist> read(size_t bytes);
seastar::future<bufferptr> read_exactly(size_t bytes);
seastar::future<> write(bufferlist);
seastar::future<> flush();
seastar::future<> write_flush(bufferlist);
// preemptively disable further reads or writes, can only be shutdown once.
void shutdown();
/// Socket can only be closed once.
seastar::future<> close();
static seastar::future<SocketRef>
connect(const entity_addr_t& peer_addr);
/*
* test interfaces
*/
// shutdown for tests
void force_shutdown() {
assert(seastar::this_shard_id() == sid);
socket.shutdown_input();
socket.shutdown_output();
}
// shutdown input_stream only, for tests
void force_shutdown_in() {
assert(seastar::this_shard_id() == sid);
socket.shutdown_input();
}
// shutdown output_stream only, for tests
void force_shutdown_out() {
assert(seastar::this_shard_id() == sid);
socket.shutdown_output();
}
private:
const seastar::shard_id sid;
seastar::connected_socket socket;
seastar::input_stream<char> in;
seastar::output_stream<char> out;
bool socket_is_shutdown;
side_t side;
uint16_t ephemeral_port;
#ifndef NDEBUG
bool closed = false;
#endif
/// buffer state for read()
struct {
bufferlist buffer;
size_t remaining;
} r;
#ifdef UNIT_TESTS_BUILT
public:
void set_trap(bp_type_t type, bp_action_t action, socket_blocker* blocker_);
private:
seastar::future<> try_trap_pre(bp_action_t& trap);
seastar::future<> try_trap_post(bp_action_t& trap);
bp_action_t next_trap_read = bp_action_t::CONTINUE;
bp_action_t next_trap_write = bp_action_t::CONTINUE;
socket_blocker* blocker = nullptr;
#endif
friend class ShardedServerSocket;
};
using listen_ertr = crimson::errorator<
crimson::ct_error::address_in_use, // The address is already bound
crimson::ct_error::address_not_available // https://techoverflow.net/2021/08/06/how-i-fixed-python-oserror-errno-99-cannot-assign-requested-address/
>;
class ShardedServerSocket
: public seastar::peering_sharded_service<ShardedServerSocket> {
struct construct_tag {};
public:
ShardedServerSocket(
seastar::shard_id sid,
bool dispatch_only_on_primary_sid,
construct_tag);
~ShardedServerSocket();
ShardedServerSocket(ShardedServerSocket&&) = delete;
ShardedServerSocket(const ShardedServerSocket&) = delete;
ShardedServerSocket& operator=(ShardedServerSocket&&) = delete;
ShardedServerSocket& operator=(const ShardedServerSocket&) = delete;
bool is_fixed_shard_dispatching() const {
return dispatch_only_on_primary_sid;
}
listen_ertr::future<> listen(entity_addr_t addr);
using accept_func_t =
std::function<seastar::future<>(SocketRef, entity_addr_t)>;
seastar::future<> accept(accept_func_t &&_fn_accept);
seastar::future<> shutdown_destroy();
static seastar::future<ShardedServerSocket*> create(
bool dispatch_only_on_this_shard);
private:
const seastar::shard_id primary_sid;
/// XXX: Remove once all infrastructure uses multi-core messenger
const bool dispatch_only_on_primary_sid;
entity_addr_t listen_addr;
std::optional<seastar::server_socket> listener;
seastar::gate shutdown_gate;
accept_func_t fn_accept;
using sharded_service_t = seastar::sharded<ShardedServerSocket>;
std::unique_ptr<sharded_service_t> service;
};
} // namespace crimson::net
| 5,090 | 24.20297 | 150 | h |
null | ceph-main/src/crimson/net/SocketConnection.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "SocketConnection.h"
#include "ProtocolV2.h"
#include "SocketMessenger.h"
#ifdef UNIT_TESTS_BUILT
#include "Interceptor.h"
#endif
using std::ostream;
using crimson::common::local_conf;
namespace crimson::net {
SocketConnection::SocketConnection(SocketMessenger& messenger,
ChainedDispatchers& dispatchers)
: msgr_sid{messenger.get_shard_id()}, messenger(messenger)
{
auto ret = create_handlers(dispatchers, *this);
io_handler = std::move(ret.io_handler);
protocol = std::move(ret.protocol);
#ifdef UNIT_TESTS_BUILT
if (messenger.interceptor) {
interceptor = messenger.interceptor;
interceptor->register_conn(this->get_local_shared_foreign_from_this());
}
#endif
}
SocketConnection::~SocketConnection() {}
bool SocketConnection::is_connected() const
{
return io_handler->is_connected();
}
#ifdef UNIT_TESTS_BUILT
bool SocketConnection::is_closed() const
{
assert(seastar::this_shard_id() == msgr_sid);
return protocol->is_closed();
}
bool SocketConnection::is_closed_clean() const
{
assert(seastar::this_shard_id() == msgr_sid);
return protocol->is_closed_clean();
}
#endif
bool SocketConnection::peer_wins() const
{
assert(seastar::this_shard_id() == msgr_sid);
return (messenger.get_myaddr() > peer_addr || policy.server);
}
seastar::future<> SocketConnection::send(MessageURef _msg)
{
// may be invoked from any core
MessageFRef msg = seastar::make_foreign(std::move(_msg));
return io_handler->send(std::move(msg));
}
seastar::future<> SocketConnection::send_keepalive()
{
// may be invoked from any core
return io_handler->send_keepalive();
}
SocketConnection::clock_t::time_point
SocketConnection::get_last_keepalive() const
{
return io_handler->get_last_keepalive();
}
SocketConnection::clock_t::time_point
SocketConnection::get_last_keepalive_ack() const
{
return io_handler->get_last_keepalive_ack();
}
void SocketConnection::set_last_keepalive_ack(clock_t::time_point when)
{
io_handler->set_last_keepalive_ack(when);
}
void SocketConnection::mark_down()
{
io_handler->mark_down();
}
void
SocketConnection::start_connect(const entity_addr_t& _peer_addr,
const entity_name_t& _peer_name)
{
assert(seastar::this_shard_id() == msgr_sid);
protocol->start_connect(_peer_addr, _peer_name);
}
void
SocketConnection::start_accept(SocketFRef&& sock,
const entity_addr_t& _peer_addr)
{
assert(seastar::this_shard_id() == msgr_sid);
protocol->start_accept(std::move(sock), _peer_addr);
}
seastar::future<>
SocketConnection::close_clean_yielded()
{
assert(seastar::this_shard_id() == msgr_sid);
return protocol->close_clean_yielded();
}
seastar::socket_address SocketConnection::get_local_address() const {
assert(seastar::this_shard_id() == msgr_sid);
return socket->get_local_address();
}
ConnectionRef
SocketConnection::get_local_shared_foreign_from_this()
{
assert(seastar::this_shard_id() == msgr_sid);
return make_local_shared_foreign(
seastar::make_foreign(shared_from_this()));
}
SocketMessenger &
SocketConnection::get_messenger() const
{
assert(seastar::this_shard_id() == msgr_sid);
return messenger;
}
seastar::shard_id
SocketConnection::get_messenger_shard_id() const
{
return msgr_sid;
}
void SocketConnection::set_peer_type(entity_type_t peer_type) {
assert(seastar::this_shard_id() == msgr_sid);
// it is not allowed to assign an unknown value when the current
// value is known
assert(!(peer_type == 0 &&
peer_name.type() != 0));
// it is not allowed to assign a different known value when the
// current value is also known.
assert(!(peer_type != 0 &&
peer_name.type() != 0 &&
peer_type != peer_name.type()));
peer_name._type = peer_type;
}
void SocketConnection::set_peer_id(int64_t peer_id) {
assert(seastar::this_shard_id() == msgr_sid);
// it is not allowed to assign an unknown value when the current
// value is known
assert(!(peer_id == entity_name_t::NEW &&
peer_name.num() != entity_name_t::NEW));
// it is not allowed to assign a different known value when the
// current value is also known.
assert(!(peer_id != entity_name_t::NEW &&
peer_name.num() != entity_name_t::NEW &&
peer_id != peer_name.num()));
peer_name._num = peer_id;
}
void SocketConnection::set_features(uint64_t f) {
assert(seastar::this_shard_id() == msgr_sid);
features = f;
}
void SocketConnection::set_socket(Socket *s) {
assert(seastar::this_shard_id() == msgr_sid);
socket = s;
}
void SocketConnection::print(ostream& out) const {
out << (void*)this << " ";
messenger.print(out);
if (seastar::this_shard_id() != msgr_sid) {
out << " >> " << get_peer_name() << " " << peer_addr;
} else if (!socket) {
out << " >> " << get_peer_name() << " " << peer_addr;
} else if (socket->get_side() == Socket::side_t::acceptor) {
out << " >> " << get_peer_name() << " " << peer_addr
<< "@" << socket->get_ephemeral_port();
} else { // socket->get_side() == Socket::side_t::connector
out << "@" << socket->get_ephemeral_port()
<< " >> " << get_peer_name() << " " << peer_addr;
}
}
} // namespace crimson::net
| 5,666 | 25.985714 | 75 | cc |
null | ceph-main/src/crimson/net/SocketConnection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <seastar/core/sharded.hh>
#include "msg/Policy.h"
#include "crimson/common/throttle.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Socket.h"
namespace crimson::net {
class ProtocolV2;
class SocketMessenger;
class SocketConnection;
using SocketConnectionRef = seastar::shared_ptr<SocketConnection>;
#ifdef UNIT_TESTS_BUILT
class Interceptor;
#endif
/**
* ConnectionHandler
*
* The interface class to implement Connection, called by SocketConnection.
*
* The operations must be done in get_shard_id().
*/
class ConnectionHandler {
public:
using clock_t = seastar::lowres_system_clock;
virtual ~ConnectionHandler() = default;
ConnectionHandler(const ConnectionHandler &) = delete;
ConnectionHandler(ConnectionHandler &&) = delete;
ConnectionHandler &operator=(const ConnectionHandler &) = delete;
ConnectionHandler &operator=(ConnectionHandler &&) = delete;
virtual seastar::shard_id get_shard_id() const = 0;
virtual bool is_connected() const = 0;
virtual seastar::future<> send(MessageFRef) = 0;
virtual seastar::future<> send_keepalive() = 0;
virtual clock_t::time_point get_last_keepalive() const = 0;
virtual clock_t::time_point get_last_keepalive_ack() const = 0;
virtual void set_last_keepalive_ack(clock_t::time_point) = 0;
virtual void mark_down() = 0;
protected:
ConnectionHandler() = default;
};
class SocketConnection : public Connection {
/*
* Connection interfaces, public to users
* Working in ConnectionHandler::get_shard_id()
*/
public:
SocketConnection(SocketMessenger& messenger,
ChainedDispatchers& dispatchers);
~SocketConnection() override;
const seastar::shard_id get_shard_id() const override {
return io_handler->get_shard_id();
}
const entity_name_t &get_peer_name() const override {
return peer_name;
}
const entity_addr_t &get_peer_addr() const override {
return peer_addr;
}
const entity_addr_t &get_peer_socket_addr() const override {
return target_addr;
}
uint64_t get_features() const override {
return features;
}
bool is_connected() const override;
seastar::future<> send(MessageURef msg) override;
seastar::future<> send_keepalive() override;
clock_t::time_point get_last_keepalive() const override;
clock_t::time_point get_last_keepalive_ack() const override;
void set_last_keepalive_ack(clock_t::time_point when) override;
void mark_down() override;
bool has_user_private() const override {
return user_private != nullptr;
}
user_private_t &get_user_private() override {
assert(has_user_private());
return *user_private;
}
void set_user_private(std::unique_ptr<user_private_t> new_user_private) override {
assert(!has_user_private());
user_private = std::move(new_user_private);
}
void print(std::ostream& out) const override;
/*
* Public to SocketMessenger
* Working in SocketMessenger::get_shard_id();
*/
public:
/// start a handshake from the client's perspective,
/// only call when SocketConnection first construct
void start_connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name);
/// start a handshake from the server's perspective,
/// only call when SocketConnection first construct
void start_accept(SocketFRef&& socket,
const entity_addr_t& peer_addr);
seastar::future<> close_clean_yielded();
seastar::socket_address get_local_address() const;
seastar::shard_id get_messenger_shard_id() const;
SocketMessenger &get_messenger() const;
ConnectionRef get_local_shared_foreign_from_this();
private:
void set_peer_type(entity_type_t peer_type);
void set_peer_id(int64_t peer_id);
void set_peer_name(entity_name_t name) {
set_peer_type(name.type());
set_peer_id(name.num());
}
void set_features(uint64_t f);
void set_socket(Socket *s);
#ifdef UNIT_TESTS_BUILT
bool is_closed_clean() const override;
bool is_closed() const override;
// peer wins if myaddr > peeraddr
bool peer_wins() const override;
Interceptor *interceptor = nullptr;
#else
// peer wins if myaddr > peeraddr
bool peer_wins() const;
#endif
private:
const seastar::shard_id msgr_sid;
/*
* Core owner is messenger core, may allow to access from the I/O core.
*/
SocketMessenger& messenger;
std::unique_ptr<ProtocolV2> protocol;
Socket *socket = nullptr;
entity_name_t peer_name = {0, entity_name_t::NEW};
entity_addr_t peer_addr;
// which of the peer_addrs we're connecting to (as client)
// or should reconnect to (as peer)
entity_addr_t target_addr;
uint64_t features = 0;
ceph::net::Policy<crimson::common::Throttle> policy;
uint64_t peer_global_id = 0;
/*
* Core owner is I/O core (mutable).
*/
std::unique_ptr<ConnectionHandler> io_handler;
/*
* Core owner is up to the connection user.
*/
std::unique_ptr<user_private_t> user_private;
friend class IOHandler;
friend class ProtocolV2;
friend class FrameAssemblerV2;
};
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::SocketConnection> : fmt::ostream_formatter {};
#endif
| 5,624 | 23.141631 | 94 | h |
null | ceph-main/src/crimson/net/SocketMessenger.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "SocketMessenger.h"
#include <seastar/core/sleep.hh>
#include <tuple>
#include <boost/functional/hash.hpp>
#include <fmt/os.h>
#include "auth/Auth.h"
#include "Errors.h"
#include "Socket.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_ms);
}
}
namespace crimson::net {
SocketMessenger::SocketMessenger(const entity_name_t& myname,
const std::string& logic_name,
uint32_t nonce,
bool dispatch_only_on_this_shard)
: sid{seastar::this_shard_id()},
logic_name{logic_name},
nonce{nonce},
dispatch_only_on_sid{dispatch_only_on_this_shard},
my_name{myname}
{}
SocketMessenger::~SocketMessenger()
{
logger().debug("~SocketMessenger: {}", logic_name);
ceph_assert_always(seastar::this_shard_id() == sid);
ceph_assert(!listener);
}
bool SocketMessenger::set_addr_unknowns(const entity_addrvec_t &addrs)
{
assert(seastar::this_shard_id() == sid);
bool ret = false;
entity_addrvec_t newaddrs = my_addrs;
for (auto& a : newaddrs.v) {
if (a.is_blank_ip()) {
int type = a.get_type();
int port = a.get_port();
uint32_t nonce = a.get_nonce();
for (auto& b : addrs.v) {
if (a.get_family() == b.get_family()) {
logger().debug(" assuming my addr {} matches provided addr {}", a, b);
a = b;
a.set_nonce(nonce);
a.set_type(type);
a.set_port(port);
ret = true;
break;
}
}
}
}
my_addrs = newaddrs;
return ret;
}
void SocketMessenger::set_myaddrs(const entity_addrvec_t& addrs)
{
assert(seastar::this_shard_id() == sid);
my_addrs = addrs;
for (auto& addr : my_addrs.v) {
addr.nonce = nonce;
}
}
crimson::net::listen_ertr::future<>
SocketMessenger::do_listen(const entity_addrvec_t& addrs)
{
ceph_assert(addrs.front().get_family() == AF_INET);
set_myaddrs(addrs);
return seastar::futurize_invoke([this] {
if (!listener) {
return ShardedServerSocket::create(dispatch_only_on_sid
).then([this] (auto _listener) {
listener = _listener;
});
} else {
return seastar::now();
}
}).then([this] () -> listen_ertr::future<> {
const entity_addr_t listen_addr = get_myaddr();
logger().debug("{} do_listen: try listen {}...", *this, listen_addr);
if (!listener) {
logger().warn("{} do_listen: listener doesn't exist", *this);
return listen_ertr::now();
}
return listener->listen(listen_addr);
});
}
SocketMessenger::bind_ertr::future<>
SocketMessenger::try_bind(const entity_addrvec_t& addrs,
uint32_t min_port, uint32_t max_port)
{
// the classical OSD iterates over the addrvec and tries to listen on each
// addr. crimson doesn't need to follow as there is a consensus we need to
// worry only about proto v2.
assert(addrs.size() == 1);
auto addr = addrs.msgr2_addr();
if (addr.get_port() != 0) {
return do_listen(addrs).safe_then([this] {
logger().info("{} try_bind: done", *this);
});
}
ceph_assert(min_port <= max_port);
return seastar::do_with(uint32_t(min_port),
[this, max_port, addr] (auto& port) {
return seastar::repeat_until_value([this, max_port, addr, &port] {
auto to_bind = addr;
to_bind.set_port(port);
return do_listen(entity_addrvec_t{to_bind}
).safe_then([this] () -> seastar::future<std::optional<std::error_code>> {
logger().info("{} try_bind: done", *this);
return seastar::make_ready_future<std::optional<std::error_code>>(
std::make_optional<std::error_code>(std::error_code{/* success! */}));
}, listen_ertr::all_same_way([this, max_port, &port]
(const std::error_code& e) mutable
-> seastar::future<std::optional<std::error_code>> {
logger().trace("{} try_bind: {} got error {}", *this, port, e);
if (port == max_port) {
return seastar::make_ready_future<std::optional<std::error_code>>(
std::make_optional<std::error_code>(e));
}
++port;
return seastar::make_ready_future<std::optional<std::error_code>>(
std::optional<std::error_code>{std::nullopt});
}));
}).then([] (const std::error_code e) -> bind_ertr::future<> {
if (!e) {
return bind_ertr::now(); // success!
} else if (e == std::errc::address_in_use) {
return crimson::ct_error::address_in_use::make();
} else if (e == std::errc::address_not_available) {
return crimson::ct_error::address_not_available::make();
}
ceph_abort();
});
});
}
SocketMessenger::bind_ertr::future<>
SocketMessenger::bind(const entity_addrvec_t& addrs)
{
assert(seastar::this_shard_id() == sid);
using crimson::common::local_conf;
return seastar::do_with(int64_t{local_conf()->ms_bind_retry_count},
[this, addrs] (auto& count) {
return seastar::repeat_until_value([this, addrs, &count] {
assert(count >= 0);
return try_bind(addrs,
local_conf()->ms_bind_port_min,
local_conf()->ms_bind_port_max)
.safe_then([this] {
logger().info("{} try_bind: done", *this);
return seastar::make_ready_future<std::optional<std::error_code>>(
std::make_optional<std::error_code>(std::error_code{/* success! */}));
}, bind_ertr::all_same_way([this, &count] (const std::error_code error) {
if (count-- > 0) {
logger().info("{} was unable to bind. Trying again in {} seconds",
*this, local_conf()->ms_bind_retry_delay);
return seastar::sleep(
std::chrono::seconds(local_conf()->ms_bind_retry_delay)
).then([] {
// one more time, please
return seastar::make_ready_future<std::optional<std::error_code>>(
std::optional<std::error_code>{std::nullopt});
});
} else {
logger().info("{} was unable to bind after {} attempts: {}",
*this, local_conf()->ms_bind_retry_count, error);
return seastar::make_ready_future<std::optional<std::error_code>>(
std::make_optional<std::error_code>(error));
}
}));
}).then([] (const std::error_code error) -> bind_ertr::future<> {
if (!error) {
return bind_ertr::now(); // success!
} else if (error == std::errc::address_in_use) {
return crimson::ct_error::address_in_use::make();
} else if (error == std::errc::address_not_available) {
return crimson::ct_error::address_not_available::make();
}
ceph_abort();
});
});
}
seastar::future<> SocketMessenger::accept(
SocketFRef &&socket, const entity_addr_t &peer_addr)
{
assert(seastar::this_shard_id() == sid);
SocketConnectionRef conn =
seastar::make_shared<SocketConnection>(*this, dispatchers);
conn->start_accept(std::move(socket), peer_addr);
return seastar::now();
}
seastar::future<> SocketMessenger::start(
const dispatchers_t& _dispatchers) {
assert(seastar::this_shard_id() == sid);
dispatchers.assign(_dispatchers);
if (listener) {
// make sure we have already bound to a valid address
ceph_assert(get_myaddr().is_msgr2());
ceph_assert(get_myaddr().get_port() > 0);
return listener->accept([this](SocketRef _socket, entity_addr_t peer_addr) {
assert(get_myaddr().is_msgr2());
SocketFRef socket = seastar::make_foreign(std::move(_socket));
if (listener->is_fixed_shard_dispatching()) {
return accept(std::move(socket), peer_addr);
} else {
return seastar::smp::submit_to(sid,
[this, peer_addr, socket = std::move(socket)]() mutable {
return accept(std::move(socket), peer_addr);
});
}
});
}
return seastar::now();
}
crimson::net::ConnectionRef
SocketMessenger::connect(const entity_addr_t& peer_addr, const entity_name_t& peer_name)
{
assert(seastar::this_shard_id() == sid);
// make sure we connect to a valid peer_addr
if (!peer_addr.is_msgr2()) {
ceph_abort_msg("ProtocolV1 is no longer supported");
}
ceph_assert(peer_addr.get_port() > 0);
if (auto found = lookup_conn(peer_addr); found) {
logger().debug("{} connect to existing", *found);
return found->get_local_shared_foreign_from_this();
}
SocketConnectionRef conn =
seastar::make_shared<SocketConnection>(*this, dispatchers);
conn->start_connect(peer_addr, peer_name);
return conn->get_local_shared_foreign_from_this();
}
seastar::future<> SocketMessenger::shutdown()
{
assert(seastar::this_shard_id() == sid);
return seastar::futurize_invoke([this] {
assert(dispatchers.empty());
if (listener) {
auto d_listener = listener;
listener = nullptr;
return d_listener->shutdown_destroy();
} else {
return seastar::now();
}
// close all connections
}).then([this] {
return seastar::parallel_for_each(accepting_conns, [] (auto conn) {
return conn->close_clean_yielded();
});
}).then([this] {
ceph_assert(accepting_conns.empty());
return seastar::parallel_for_each(connections, [] (auto conn) {
return conn.second->close_clean_yielded();
});
}).then([this] {
return seastar::parallel_for_each(closing_conns, [] (auto conn) {
return conn->close_clean_yielded();
});
}).then([this] {
ceph_assert(connections.empty());
shutdown_promise.set_value();
});
}
static entity_addr_t choose_addr(
const entity_addr_t &peer_addr_for_me,
const SocketConnection& conn)
{
using crimson::common::local_conf;
// XXX: a syscall is here
if (const auto local_addr = conn.get_local_address();
local_conf()->ms_learn_addr_from_peer) {
logger().info("{} peer {} says I am {} (socket says {})",
conn, conn.get_peer_socket_addr(), peer_addr_for_me,
local_addr);
return peer_addr_for_me;
} else {
const auto local_addr_for_me = conn.get_local_address();
logger().info("{} socket to {} says I am {} (peer says {})",
conn, conn.get_peer_socket_addr(),
local_addr, peer_addr_for_me);
entity_addr_t addr;
addr.set_sockaddr(&local_addr_for_me.as_posix_sockaddr());
return addr;
}
}
void SocketMessenger::learned_addr(
const entity_addr_t &peer_addr_for_me,
const SocketConnection& conn)
{
assert(seastar::this_shard_id() == sid);
if (!need_addr) {
if ((!get_myaddr().is_any() &&
get_myaddr().get_type() != peer_addr_for_me.get_type()) ||
get_myaddr().get_family() != peer_addr_for_me.get_family() ||
!get_myaddr().is_same_host(peer_addr_for_me)) {
logger().warn("{} peer_addr_for_me {} type/family/IP doesn't match myaddr {}",
conn, peer_addr_for_me, get_myaddr());
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
return;
}
if (get_myaddr().get_type() == entity_addr_t::TYPE_NONE) {
// Not bound
auto addr = choose_addr(peer_addr_for_me, conn);
addr.set_type(entity_addr_t::TYPE_ANY);
addr.set_port(0);
need_addr = false;
set_myaddrs(entity_addrvec_t{addr});
logger().info("{} learned myaddr={} (unbound)", conn, get_myaddr());
} else {
// Already bound
if (!get_myaddr().is_any() &&
get_myaddr().get_type() != peer_addr_for_me.get_type()) {
logger().warn("{} peer_addr_for_me {} type doesn't match myaddr {}",
conn, peer_addr_for_me, get_myaddr());
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
if (get_myaddr().get_family() != peer_addr_for_me.get_family()) {
logger().warn("{} peer_addr_for_me {} family doesn't match myaddr {}",
conn, peer_addr_for_me, get_myaddr());
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
}
if (get_myaddr().is_blank_ip()) {
auto addr = choose_addr(peer_addr_for_me, conn);
addr.set_type(get_myaddr().get_type());
addr.set_port(get_myaddr().get_port());
need_addr = false;
set_myaddrs(entity_addrvec_t{addr});
logger().info("{} learned myaddr={} (blank IP)", conn, get_myaddr());
} else if (!get_myaddr().is_same_host(peer_addr_for_me)) {
logger().warn("{} peer_addr_for_me {} IP doesn't match myaddr {}",
conn, peer_addr_for_me, get_myaddr());
throw std::system_error(
make_error_code(crimson::net::error::bad_peer_address));
} else {
need_addr = false;
}
}
}
SocketPolicy SocketMessenger::get_policy(entity_type_t peer_type) const
{
assert(seastar::this_shard_id() == sid);
return policy_set.get(peer_type);
}
SocketPolicy SocketMessenger::get_default_policy() const
{
assert(seastar::this_shard_id() == sid);
return policy_set.get_default();
}
void SocketMessenger::set_default_policy(const SocketPolicy& p)
{
assert(seastar::this_shard_id() == sid);
policy_set.set_default(p);
}
void SocketMessenger::set_policy(entity_type_t peer_type,
const SocketPolicy& p)
{
assert(seastar::this_shard_id() == sid);
policy_set.set(peer_type, p);
}
void SocketMessenger::set_policy_throttler(entity_type_t peer_type,
Throttle* throttle)
{
assert(seastar::this_shard_id() == sid);
// only byte throttler is used in OSD
policy_set.set_throttlers(peer_type, throttle, nullptr);
}
crimson::net::SocketConnectionRef SocketMessenger::lookup_conn(const entity_addr_t& addr)
{
assert(seastar::this_shard_id() == sid);
if (auto found = connections.find(addr);
found != connections.end()) {
return found->second;
} else {
return nullptr;
}
}
void SocketMessenger::accept_conn(SocketConnectionRef conn)
{
assert(seastar::this_shard_id() == sid);
accepting_conns.insert(conn);
}
void SocketMessenger::unaccept_conn(SocketConnectionRef conn)
{
assert(seastar::this_shard_id() == sid);
accepting_conns.erase(conn);
}
void SocketMessenger::register_conn(SocketConnectionRef conn)
{
assert(seastar::this_shard_id() == sid);
auto [i, added] = connections.emplace(conn->get_peer_addr(), conn);
std::ignore = i;
ceph_assert(added);
}
void SocketMessenger::unregister_conn(SocketConnectionRef conn)
{
assert(seastar::this_shard_id() == sid);
ceph_assert(conn);
auto found = connections.find(conn->get_peer_addr());
ceph_assert(found != connections.end());
ceph_assert(found->second == conn);
connections.erase(found);
}
void SocketMessenger::closing_conn(SocketConnectionRef conn)
{
assert(seastar::this_shard_id() == sid);
closing_conns.push_back(conn);
}
void SocketMessenger::closed_conn(SocketConnectionRef conn)
{
assert(seastar::this_shard_id() == sid);
for (auto it = closing_conns.begin();
it != closing_conns.end();) {
if (*it == conn) {
it = closing_conns.erase(it);
} else {
it++;
}
}
}
uint32_t SocketMessenger::get_global_seq(uint32_t old)
{
assert(seastar::this_shard_id() == sid);
if (old > global_seq) {
global_seq = old;
}
return ++global_seq;
}
} // namespace crimson::net
| 15,763 | 31.436214 | 89 | cc |
null | ceph-main/src/crimson/net/SocketMessenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <map>
#include <set>
#include <vector>
#include <seastar/core/gate.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/sharded.hh>
#include <seastar/core/shared_future.hh>
#include "crimson/net/chained_dispatchers.h"
#include "Messenger.h"
#include "Socket.h"
#include "SocketConnection.h"
namespace crimson::net {
class ShardedServerSocket;
class SocketMessenger final : public Messenger {
// Messenger public interfaces
public:
SocketMessenger(const entity_name_t& myname,
const std::string& logic_name,
uint32_t nonce,
bool dispatch_only_on_this_shard);
~SocketMessenger() override;
const entity_name_t &get_myname() const override {
return my_name;
}
const entity_addrvec_t &get_myaddrs() const override {
return my_addrs;
}
void set_myaddrs(const entity_addrvec_t& addr) override;
bool set_addr_unknowns(const entity_addrvec_t &addr) override;
void set_auth_client(crimson::auth::AuthClient *ac) override {
assert(seastar::this_shard_id() == sid);
auth_client = ac;
}
void set_auth_server(crimson::auth::AuthServer *as) override {
assert(seastar::this_shard_id() == sid);
auth_server = as;
}
bind_ertr::future<> bind(const entity_addrvec_t& addr) override;
seastar::future<> start(const dispatchers_t& dispatchers) override;
ConnectionRef connect(const entity_addr_t& peer_addr,
const entity_name_t& peer_name) override;
bool owns_connection(Connection &conn) const override {
assert(seastar::this_shard_id() == sid);
return this == &static_cast<SocketConnection&>(conn).get_messenger();
}
// can only wait once
seastar::future<> wait() override {
assert(seastar::this_shard_id() == sid);
return shutdown_promise.get_future();
}
void stop() override {
assert(seastar::this_shard_id() == sid);
dispatchers.clear();
}
bool is_started() const override {
assert(seastar::this_shard_id() == sid);
return !dispatchers.empty();
}
seastar::future<> shutdown() override;
void print(std::ostream& out) const override {
out << get_myname()
<< "(" << logic_name
<< ") " << get_myaddr();
}
SocketPolicy get_policy(entity_type_t peer_type) const override;
SocketPolicy get_default_policy() const override;
void set_default_policy(const SocketPolicy& p) override;
void set_policy(entity_type_t peer_type, const SocketPolicy& p) override;
void set_policy_throttler(entity_type_t peer_type, Throttle* throttle) override;
// SocketMessenger public interfaces
public:
crimson::auth::AuthClient* get_auth_client() const {
assert(seastar::this_shard_id() == sid);
return auth_client;
}
crimson::auth::AuthServer* get_auth_server() const {
assert(seastar::this_shard_id() == sid);
return auth_server;
}
uint32_t get_global_seq(uint32_t old=0);
void learned_addr(const entity_addr_t &peer_addr_for_me,
const SocketConnection& conn);
SocketConnectionRef lookup_conn(const entity_addr_t& addr);
void accept_conn(SocketConnectionRef);
void unaccept_conn(SocketConnectionRef);
void register_conn(SocketConnectionRef);
void unregister_conn(SocketConnectionRef);
void closing_conn(SocketConnectionRef);
void closed_conn(SocketConnectionRef);
seastar::shard_id get_shard_id() const {
return sid;
}
#ifdef UNIT_TESTS_BUILT
void set_interceptor(Interceptor *i) override {
interceptor = i;
}
Interceptor *interceptor = nullptr;
#endif
private:
seastar::future<> accept(SocketFRef &&, const entity_addr_t &);
listen_ertr::future<> do_listen(const entity_addrvec_t& addr);
/// try to bind to the first unused port of given address
bind_ertr::future<> try_bind(const entity_addrvec_t& addr,
uint32_t min_port, uint32_t max_port);
const seastar::shard_id sid;
// Distinguish messengers with meaningful names for debugging
const std::string logic_name;
const uint32_t nonce;
const bool dispatch_only_on_sid;
entity_name_t my_name;
entity_addrvec_t my_addrs;
crimson::auth::AuthClient* auth_client = nullptr;
crimson::auth::AuthServer* auth_server = nullptr;
ShardedServerSocket *listener = nullptr;
ChainedDispatchers dispatchers;
std::map<entity_addr_t, SocketConnectionRef> connections;
std::set<SocketConnectionRef> accepting_conns;
std::vector<SocketConnectionRef> closing_conns;
ceph::net::PolicySet<Throttle> policy_set;
// specifying we haven't learned our addr; set false when we find it.
bool need_addr = true;
uint32_t global_seq = 0;
bool started = false;
seastar::promise<> shutdown_promise;
};
} // namespace crimson::net
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::SocketMessenger> : fmt::ostream_formatter {};
#endif
| 5,289 | 26.409326 | 93 | h |
null | ceph-main/src/crimson/net/chained_dispatchers.cc | #include "crimson/common/log.h"
#include "crimson/net/chained_dispatchers.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Dispatcher.h"
#include "msg/Message.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_ms);
}
}
namespace crimson::net {
seastar::future<>
ChainedDispatchers::ms_dispatch(crimson::net::ConnectionRef conn,
MessageRef m) {
try {
for (auto& dispatcher : dispatchers) {
auto dispatched = dispatcher->ms_dispatch(conn, m);
if (dispatched.has_value()) {
return std::move(*dispatched
).handle_exception([conn] (std::exception_ptr eptr) {
logger().error("{} got unexpected exception in ms_dispatch() throttling {}",
*conn, eptr);
ceph_abort();
});
}
}
} catch (...) {
logger().error("{} got unexpected exception in ms_dispatch() {}",
*conn, std::current_exception());
ceph_abort();
}
if (!dispatchers.empty()) {
logger().error("ms_dispatch unhandled message {}", *m);
}
return seastar::now();
}
void
ChainedDispatchers::ms_handle_accept(
crimson::net::ConnectionRef conn,
seastar::shard_id new_shard,
bool is_replace) {
try {
for (auto& dispatcher : dispatchers) {
dispatcher->ms_handle_accept(conn, new_shard, is_replace);
}
} catch (...) {
logger().error("{} got unexpected exception in ms_handle_accept() {}",
*conn, std::current_exception());
ceph_abort();
}
}
void
ChainedDispatchers::ms_handle_connect(
crimson::net::ConnectionRef conn,
seastar::shard_id new_shard) {
try {
for(auto& dispatcher : dispatchers) {
dispatcher->ms_handle_connect(conn, new_shard);
}
} catch (...) {
logger().error("{} got unexpected exception in ms_handle_connect() {}",
*conn, std::current_exception());
ceph_abort();
}
}
void
ChainedDispatchers::ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) {
try {
for (auto& dispatcher : dispatchers) {
dispatcher->ms_handle_reset(conn, is_replace);
}
} catch (...) {
logger().error("{} got unexpected exception in ms_handle_reset() {}",
*conn, std::current_exception());
ceph_abort();
}
}
void
ChainedDispatchers::ms_handle_remote_reset(crimson::net::ConnectionRef conn) {
try {
for (auto& dispatcher : dispatchers) {
dispatcher->ms_handle_remote_reset(conn);
}
} catch (...) {
logger().error("{} got unexpected exception in ms_handle_remote_reset() {}",
*conn, std::current_exception());
ceph_abort();
}
}
}
| 2,706 | 26.343434 | 88 | cc |
null | ceph-main/src/crimson/net/chained_dispatchers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/smp.hh>
#include "Fwd.h"
#include "crimson/common/log.h"
namespace crimson::net {
class Dispatcher;
class ChainedDispatchers {
public:
void assign(const dispatchers_t& _dispatchers) {
assert(empty());
assert(!_dispatchers.empty());
dispatchers = _dispatchers;
}
void clear() {
dispatchers.clear();
}
bool empty() const {
return dispatchers.empty();
}
seastar::future<> ms_dispatch(crimson::net::ConnectionRef, MessageRef);
void ms_handle_accept(crimson::net::ConnectionRef conn, seastar::shard_id, bool is_replace);
void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id);
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace);
void ms_handle_remote_reset(crimson::net::ConnectionRef conn);
private:
dispatchers_t dispatchers;
};
}
| 960 | 23.641026 | 94 | h |
null | ceph-main/src/crimson/net/io_handler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "io_handler.h"
#include "auth/Auth.h"
#include "crimson/common/formatter.h"
#include "crimson/common/log.h"
#include "crimson/net/Errors.h"
#include "crimson/net/chained_dispatchers.h"
#include "crimson/net/SocketMessenger.h"
#include "msg/Message.h"
#include "msg/msg_fmt.h"
using namespace ceph::msgr::v2;
using crimson::common::local_conf;
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_ms);
}
[[noreturn]] void abort_in_fault() {
throw std::system_error(make_error_code(crimson::net::error::negotiation_failure));
}
[[noreturn]] void abort_protocol() {
throw std::system_error(make_error_code(crimson::net::error::protocol_aborted));
}
std::size_t get_msg_size(const FrameAssembler &rx_frame_asm)
{
ceph_assert(rx_frame_asm.get_num_segments() > 0);
size_t sum = 0;
// we don't include SegmentIndex::Msg::HEADER.
for (size_t idx = 1; idx < rx_frame_asm.get_num_segments(); idx++) {
sum += rx_frame_asm.get_segment_logical_len(idx);
}
return sum;
}
} // namespace anonymous
namespace crimson::net {
IOHandler::IOHandler(ChainedDispatchers &dispatchers,
SocketConnection &conn)
: shard_states(shard_states_t::create(
seastar::this_shard_id(), io_state_t::none)),
dispatchers(dispatchers),
conn(conn),
conn_ref(conn.get_local_shared_foreign_from_this())
{}
IOHandler::~IOHandler()
{
// close_io() must be finished
ceph_assert_always(maybe_prv_shard_states == nullptr);
// should be true in the according shard
// ceph_assert_always(shard_states->assert_closed_and_exit());
assert(!conn_ref);
}
ceph::bufferlist IOHandler::sweep_out_pending_msgs_to_sent(
bool require_keepalive,
std::optional<utime_t> maybe_keepalive_ack,
bool require_ack)
{
std::size_t num_msgs = out_pending_msgs.size();
ceph::bufferlist bl;
if (unlikely(require_keepalive)) {
auto keepalive_frame = KeepAliveFrame::Encode();
bl.append(frame_assembler->get_buffer(keepalive_frame));
}
if (unlikely(maybe_keepalive_ack.has_value())) {
auto keepalive_ack_frame = KeepAliveFrameAck::Encode(*maybe_keepalive_ack);
bl.append(frame_assembler->get_buffer(keepalive_ack_frame));
}
if (require_ack && num_msgs == 0u) {
auto ack_frame = AckFrame::Encode(in_seq);
bl.append(frame_assembler->get_buffer(ack_frame));
}
std::for_each(
out_pending_msgs.begin(),
out_pending_msgs.begin()+num_msgs,
[this, &bl](const MessageFRef& msg) {
// set priority
msg->get_header().src = conn.messenger.get_myname();
msg->encode(conn.features, 0);
ceph_assert(!msg->get_seq() && "message already has seq");
msg->set_seq(++out_seq);
ceph_msg_header &header = msg->get_header();
ceph_msg_footer &footer = msg->get_footer();
ceph_msg_header2 header2{header.seq, header.tid,
header.type, header.priority,
header.version,
ceph_le32(0), header.data_off,
ceph_le64(in_seq),
footer.flags, header.compat_version,
header.reserved};
auto message = MessageFrame::Encode(header2,
msg->get_payload(), msg->get_middle(), msg->get_data());
logger().debug("{} --> #{} === {} ({})",
conn, msg->get_seq(), *msg, msg->get_type());
bl.append(frame_assembler->get_buffer(message));
});
if (!conn.policy.lossy) {
out_sent_msgs.insert(
out_sent_msgs.end(),
std::make_move_iterator(out_pending_msgs.begin()),
std::make_move_iterator(out_pending_msgs.end()));
}
out_pending_msgs.clear();
return bl;
}
seastar::future<> IOHandler::send(MessageFRef msg)
{
// sid may be changed on-the-fly during the submission
if (seastar::this_shard_id() == get_shard_id()) {
return do_send(std::move(msg));
} else {
logger().trace("{} send() is directed to {} -- {}",
conn, get_shard_id(), *msg);
return seastar::smp::submit_to(
get_shard_id(), [this, msg=std::move(msg)]() mutable {
return send_redirected(std::move(msg));
});
}
}
seastar::future<> IOHandler::send_redirected(MessageFRef msg)
{
// sid may be changed on-the-fly during the submission
if (seastar::this_shard_id() == get_shard_id()) {
return do_send(std::move(msg));
} else {
logger().debug("{} send() is redirected to {} -- {}",
conn, get_shard_id(), *msg);
return seastar::smp::submit_to(
get_shard_id(), [this, msg=std::move(msg)]() mutable {
return send_redirected(std::move(msg));
});
}
}
seastar::future<> IOHandler::do_send(MessageFRef msg)
{
assert(seastar::this_shard_id() == get_shard_id());
logger().trace("{} do_send() got message -- {}", conn, *msg);
if (get_io_state() != io_state_t::drop) {
out_pending_msgs.push_back(std::move(msg));
notify_out_dispatch();
}
return seastar::now();
}
seastar::future<> IOHandler::send_keepalive()
{
// sid may be changed on-the-fly during the submission
if (seastar::this_shard_id() == get_shard_id()) {
return do_send_keepalive();
} else {
logger().trace("{} send_keepalive() is directed to {}", conn, get_shard_id());
return seastar::smp::submit_to(
get_shard_id(), [this] {
return send_keepalive_redirected();
});
}
}
seastar::future<> IOHandler::send_keepalive_redirected()
{
// sid may be changed on-the-fly during the submission
if (seastar::this_shard_id() == get_shard_id()) {
return do_send_keepalive();
} else {
logger().debug("{} send_keepalive() is redirected to {}", conn, get_shard_id());
return seastar::smp::submit_to(
get_shard_id(), [this] {
return send_keepalive_redirected();
});
}
}
seastar::future<> IOHandler::do_send_keepalive()
{
assert(seastar::this_shard_id() == get_shard_id());
logger().trace("{} do_send_keeplive(): need_keepalive={}", conn, need_keepalive);
if (!need_keepalive) {
need_keepalive = true;
notify_out_dispatch();
}
return seastar::now();
}
void IOHandler::mark_down()
{
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
ceph_assert_always(get_io_state() != io_state_t::none);
need_dispatch_reset = false;
if (get_io_state() == io_state_t::drop) {
return;
}
auto cc_seq = crosscore.prepare_submit();
logger().info("{} mark_down() at {}, send {} notify_mark_down()",
conn, io_stat_printer{*this}, cc_seq);
do_set_io_state(io_state_t::drop);
shard_states->dispatch_in_background(
"notify_mark_down", conn, [this, cc_seq] {
return seastar::smp::submit_to(
conn.get_messenger_shard_id(), [this, cc_seq] {
return handshake_listener->notify_mark_down(cc_seq);
});
});
}
void IOHandler::print_io_stat(std::ostream &out) const
{
assert(seastar::this_shard_id() == get_shard_id());
out << "io_stat("
<< "io_state=" << fmt::format("{}", get_io_state())
<< ", in_seq=" << in_seq
<< ", out_seq=" << out_seq
<< ", out_pending_msgs_size=" << out_pending_msgs.size()
<< ", out_sent_msgs_size=" << out_sent_msgs.size()
<< ", need_ack=" << (ack_left > 0)
<< ", need_keepalive=" << need_keepalive
<< ", need_keepalive_ack=" << bool(next_keepalive_ack)
<< ")";
}
void IOHandler::assign_frame_assembler(FrameAssemblerV2Ref fa)
{
assert(fa != nullptr);
ceph_assert_always(frame_assembler == nullptr);
frame_assembler = std::move(fa);
ceph_assert_always(
frame_assembler->get_shard_id() == get_shard_id());
// should have been set through dispatch_accept/connect()
ceph_assert_always(
frame_assembler->get_socket_shard_id() == get_shard_id());
ceph_assert_always(frame_assembler->is_socket_valid());
}
void IOHandler::do_set_io_state(
io_state_t new_state,
std::optional<crosscore_t::seq_t> cc_seq,
FrameAssemblerV2Ref fa,
bool set_notify_out)
{
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
auto prv_state = get_io_state();
logger().debug("{} got {}do_set_io_state(): prv_state={}, new_state={}, "
"fa={}, set_notify_out={}, at {}",
conn,
cc_seq.has_value() ? fmt::format("{} ", *cc_seq) : "",
prv_state, new_state,
fa ? "present" : "N/A", set_notify_out,
io_stat_printer{*this});
ceph_assert_always(!(
(new_state == io_state_t::none && prv_state != io_state_t::none) ||
(new_state == io_state_t::open && prv_state == io_state_t::open)
));
if (prv_state == io_state_t::drop) {
// only possible due to a racing mark_down() from user
if (new_state == io_state_t::open) {
assign_frame_assembler(std::move(fa));
frame_assembler->shutdown_socket<false>(nullptr);
} else {
assert(fa == nullptr);
}
return;
}
bool dispatch_in = false;
if (new_state == io_state_t::open) {
// to open
ceph_assert_always(protocol_is_connected == true);
assign_frame_assembler(std::move(fa));
dispatch_in = true;
#ifdef UNIT_TESTS_BUILT
if (conn.interceptor) {
// FIXME: doesn't support cross-core
conn.interceptor->register_conn_ready(
conn.get_local_shared_foreign_from_this());
}
#endif
} else if (prv_state == io_state_t::open) {
// from open
ceph_assert_always(protocol_is_connected == true);
protocol_is_connected = false;
assert(fa == nullptr);
ceph_assert_always(frame_assembler->is_socket_valid());
frame_assembler->shutdown_socket<false>(nullptr);
} else {
assert(fa == nullptr);
}
if (new_state == io_state_t::delay) {
need_notify_out = set_notify_out;
if (need_notify_out) {
maybe_notify_out_dispatch();
}
} else {
assert(set_notify_out == false);
need_notify_out = false;
}
// FIXME: simplify and drop the prv_state == new_state case
if (prv_state != new_state) {
shard_states->set_io_state(new_state);
}
/*
* not atomic below
*/
if (dispatch_in) {
do_in_dispatch();
}
}
seastar::future<> IOHandler::set_io_state(
crosscore_t::seq_t cc_seq,
io_state_t new_state,
FrameAssemblerV2Ref fa,
bool set_notify_out)
{
assert(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} set_io_state(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq, new_state,
fa=std::move(fa), set_notify_out]() mutable {
return set_io_state(cc_seq, new_state, std::move(fa), set_notify_out);
});
}
do_set_io_state(new_state, cc_seq, std::move(fa), set_notify_out);
return seastar::now();
}
seastar::future<IOHandler::exit_dispatching_ret>
IOHandler::wait_io_exit_dispatching(
crosscore_t::seq_t cc_seq)
{
assert(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} wait_io_exit_dispatching(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq] {
return wait_io_exit_dispatching(cc_seq);
});
}
logger().debug("{} got {} wait_io_exit_dispatching()",
conn, cc_seq);
ceph_assert_always(get_io_state() != io_state_t::open);
ceph_assert_always(frame_assembler != nullptr);
ceph_assert_always(!frame_assembler->is_socket_valid());
return seastar::futurize_invoke([this] {
// cannot be running in parallel with to_new_sid()
if (maybe_dropped_sid.has_value()) {
ceph_assert_always(get_io_state() == io_state_t::drop);
assert(shard_states->assert_closed_and_exit());
auto prv_sid = *maybe_dropped_sid;
return seastar::smp::submit_to(prv_sid, [this] {
logger().debug("{} got wait_io_exit_dispatching from prv_sid", conn);
assert(maybe_prv_shard_states != nullptr);
return maybe_prv_shard_states->wait_io_exit_dispatching();
});
} else {
return shard_states->wait_io_exit_dispatching();
}
}).then([this] {
logger().debug("{} finish wait_io_exit_dispatching at {}",
conn, io_stat_printer{*this});
ceph_assert_always(frame_assembler != nullptr);
ceph_assert_always(!frame_assembler->is_socket_valid());
frame_assembler->set_shard_id(conn.get_messenger_shard_id());
return exit_dispatching_ret{
std::move(frame_assembler),
get_states()};
});
}
seastar::future<> IOHandler::reset_session(
crosscore_t::seq_t cc_seq,
bool full)
{
assert(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} reset_session(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq, full] {
return reset_session(cc_seq, full);
});
}
logger().debug("{} got {} reset_session({})",
conn, cc_seq, full);
assert(get_io_state() != io_state_t::open);
reset_in();
if (full) {
reset_out();
dispatch_remote_reset();
}
return seastar::now();
}
seastar::future<> IOHandler::reset_peer_state(
crosscore_t::seq_t cc_seq)
{
assert(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} reset_peer_state(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq] {
return reset_peer_state(cc_seq);
});
}
logger().debug("{} got {} reset_peer_state()",
conn, cc_seq);
assert(get_io_state() != io_state_t::open);
reset_in();
do_requeue_out_sent_up_to(0);
discard_out_sent();
return seastar::now();
}
seastar::future<> IOHandler::requeue_out_sent(
crosscore_t::seq_t cc_seq)
{
assert(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} requeue_out_sent(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq] {
return requeue_out_sent(cc_seq);
});
}
logger().debug("{} got {} requeue_out_sent()",
conn, cc_seq);
do_requeue_out_sent();
return seastar::now();
}
void IOHandler::do_requeue_out_sent()
{
assert(get_io_state() != io_state_t::open);
if (out_sent_msgs.empty()) {
return;
}
out_seq -= out_sent_msgs.size();
logger().debug("{} requeue {} items, revert out_seq to {}",
conn, out_sent_msgs.size(), out_seq);
for (MessageFRef& msg : out_sent_msgs) {
msg->clear_payload();
msg->set_seq(0);
}
out_pending_msgs.insert(
out_pending_msgs.begin(),
std::make_move_iterator(out_sent_msgs.begin()),
std::make_move_iterator(out_sent_msgs.end()));
out_sent_msgs.clear();
maybe_notify_out_dispatch();
}
seastar::future<> IOHandler::requeue_out_sent_up_to(
crosscore_t::seq_t cc_seq,
seq_num_t msg_seq)
{
assert(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} requeue_out_sent_up_to(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq, msg_seq] {
return requeue_out_sent_up_to(cc_seq, msg_seq);
});
}
logger().debug("{} got {} requeue_out_sent_up_to({})",
conn, cc_seq, msg_seq);
do_requeue_out_sent_up_to(msg_seq);
return seastar::now();
}
void IOHandler::do_requeue_out_sent_up_to(seq_num_t seq)
{
assert(get_io_state() != io_state_t::open);
if (out_sent_msgs.empty() && out_pending_msgs.empty()) {
logger().debug("{} nothing to requeue, reset out_seq from {} to seq {}",
conn, out_seq, seq);
out_seq = seq;
return;
}
logger().debug("{} discarding sent msgs by seq {} (sent_len={}, out_seq={})",
conn, seq, out_sent_msgs.size(), out_seq);
while (!out_sent_msgs.empty()) {
auto cur_seq = out_sent_msgs.front()->get_seq();
if (cur_seq == 0 || cur_seq > seq) {
break;
} else {
out_sent_msgs.pop_front();
}
}
do_requeue_out_sent();
}
void IOHandler::reset_in()
{
assert(get_io_state() != io_state_t::open);
in_seq = 0;
}
void IOHandler::reset_out()
{
assert(get_io_state() != io_state_t::open);
discard_out_sent();
out_pending_msgs.clear();
need_keepalive = false;
next_keepalive_ack = std::nullopt;
ack_left = 0;
}
void IOHandler::discard_out_sent()
{
assert(get_io_state() != io_state_t::open);
out_seq = 0;
out_sent_msgs.clear();
}
seastar::future<>
IOHandler::dispatch_accept(
crosscore_t::seq_t cc_seq,
seastar::shard_id new_sid,
ConnectionFRef conn_fref,
bool is_replace)
{
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} dispatch_accept(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq, new_sid, is_replace,
conn_fref=std::move(conn_fref)]() mutable {
return dispatch_accept(cc_seq, new_sid, std::move(conn_fref), is_replace);
});
}
logger().debug("{} got {} dispatch_accept(new_sid={}, replace={}) at {}",
conn, cc_seq, new_sid, is_replace, io_stat_printer{*this});
if (get_io_state() == io_state_t::drop) {
assert(!protocol_is_connected);
// it is possible that both io_handler and protocolv2 are
// trying to close each other from different cores simultaneously.
return to_new_sid(new_sid, std::move(conn_fref));
}
// protocol_is_connected can be from true to true here if the replacing is
// happening to a connected connection.
protocol_is_connected = true;
ceph_assert_always(conn_ref);
auto _conn_ref = conn_ref;
auto fut = to_new_sid(new_sid, std::move(conn_fref));
dispatchers.ms_handle_accept(_conn_ref, new_sid, is_replace);
// user can make changes
return fut;
}
seastar::future<>
IOHandler::dispatch_connect(
crosscore_t::seq_t cc_seq,
seastar::shard_id new_sid,
ConnectionFRef conn_fref)
{
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} dispatch_connect(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq, new_sid,
conn_fref=std::move(conn_fref)]() mutable {
return dispatch_connect(cc_seq, new_sid, std::move(conn_fref));
});
}
logger().debug("{} got {} dispatch_connect({}) at {}",
conn, cc_seq, new_sid, io_stat_printer{*this});
if (get_io_state() == io_state_t::drop) {
assert(!protocol_is_connected);
// it is possible that both io_handler and protocolv2 are
// trying to close each other from different cores simultaneously.
return to_new_sid(new_sid, std::move(conn_fref));
}
ceph_assert_always(protocol_is_connected == false);
protocol_is_connected = true;
ceph_assert_always(conn_ref);
auto _conn_ref = conn_ref;
auto fut = to_new_sid(new_sid, std::move(conn_fref));
dispatchers.ms_handle_connect(_conn_ref, new_sid);
// user can make changes
return fut;
}
seastar::future<>
IOHandler::cleanup_prv_shard(seastar::shard_id prv_sid)
{
assert(seastar::this_shard_id() == get_shard_id());
return seastar::smp::submit_to(prv_sid, [this] {
logger().debug("{} got cleanup_prv_shard()", conn);
assert(maybe_prv_shard_states != nullptr);
auto ref_prv_states = std::move(maybe_prv_shard_states);
auto &prv_states = *ref_prv_states;
return prv_states.close(
).then([ref_prv_states=std::move(ref_prv_states)] {
ceph_assert_always(ref_prv_states->assert_closed_and_exit());
});
}).then([this] {
ceph_assert_always(maybe_prv_shard_states == nullptr);
});
}
seastar::future<>
IOHandler::to_new_sid(
seastar::shard_id new_sid,
ConnectionFRef conn_fref)
{
/*
* Note:
* - It must be called before user is aware of the new core (through dispatching);
* - Messenger must wait the returned future for futher operations to prevent racing;
* - In general, the below submitted continuation should be the first one from the prv sid
* to the new sid;
*/
assert(seastar::this_shard_id() == get_shard_id());
bool is_dropped = false;
if (get_io_state() == io_state_t::drop) {
is_dropped = true;
}
ceph_assert_always(get_io_state() != io_state_t::open);
// apply the switching atomically
ceph_assert_always(conn_ref);
conn_ref.reset();
auto prv_sid = get_shard_id();
ceph_assert_always(maybe_prv_shard_states == nullptr);
maybe_prv_shard_states = std::move(shard_states);
shard_states = shard_states_t::create_from_previous(
*maybe_prv_shard_states, new_sid);
assert(new_sid == get_shard_id());
return seastar::smp::submit_to(new_sid,
[this, is_dropped, prv_sid, conn_fref=std::move(conn_fref)]() mutable {
logger().debug("{} see new_sid in io_handler(new_sid) from {}, is_dropped={}",
conn, prv_sid, is_dropped);
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
ceph_assert_always(get_io_state() != io_state_t::open);
ceph_assert_always(!maybe_dropped_sid.has_value());
ceph_assert_always(!conn_ref);
conn_ref = make_local_shared_foreign(std::move(conn_fref));
if (is_dropped) {
// the follow up cleanups will be done in the prv_sid
ceph_assert_always(shard_states->assert_closed_and_exit());
maybe_dropped_sid = prv_sid;
} else {
// may be at io_state_t::drop
// cleanup the prvious shard
shard_states->dispatch_in_background(
"cleanup_prv_sid", conn, [this, prv_sid] {
return cleanup_prv_shard(prv_sid);
});
maybe_notify_out_dispatch();
}
});
}
seastar::future<> IOHandler::set_accepted_sid(
crosscore_t::seq_t cc_seq,
seastar::shard_id sid,
ConnectionFRef conn_fref)
{
assert(seastar::this_shard_id() == get_shard_id());
assert(get_io_state() == io_state_t::none);
ceph_assert_always(conn_ref);
conn_ref.reset();
assert(maybe_prv_shard_states == nullptr);
shard_states.reset();
shard_states = shard_states_t::create(sid, io_state_t::none);
return seastar::smp::submit_to(sid,
[this, cc_seq, conn_fref=std::move(conn_fref)]() mutable {
// must be the first to proceed
ceph_assert_always(crosscore.proceed_or_wait(cc_seq));
logger().debug("{} set accepted sid", conn);
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
ceph_assert_always(get_io_state() == io_state_t::none);
assert(maybe_prv_shard_states == nullptr);
ceph_assert_always(!conn_ref);
conn_ref = make_local_shared_foreign(std::move(conn_fref));
});
}
void IOHandler::dispatch_reset(bool is_replace)
{
ceph_assert_always(get_io_state() == io_state_t::drop);
if (!need_dispatch_reset) {
return;
}
need_dispatch_reset = false;
ceph_assert_always(conn_ref);
dispatchers.ms_handle_reset(conn_ref, is_replace);
// user can make changes
}
void IOHandler::dispatch_remote_reset()
{
if (get_io_state() == io_state_t::drop) {
return;
}
ceph_assert_always(conn_ref);
dispatchers.ms_handle_remote_reset(conn_ref);
// user can make changes
}
void IOHandler::ack_out_sent(seq_num_t seq)
{
if (conn.policy.lossy) { // lossy connections don't keep sent messages
return;
}
while (!out_sent_msgs.empty() &&
out_sent_msgs.front()->get_seq() <= seq) {
logger().trace("{} got ack seq {} >= {}, pop {}",
conn, seq, out_sent_msgs.front()->get_seq(),
*out_sent_msgs.front());
out_sent_msgs.pop_front();
}
}
seastar::future<>
IOHandler::do_out_dispatch(shard_states_t &ctx)
{
return seastar::repeat([this, &ctx] {
switch (ctx.get_io_state()) {
case io_state_t::open: {
if (unlikely(!is_out_queued())) {
// try exit open dispatching
return frame_assembler->flush<false>(
).then([this, &ctx] {
if (ctx.get_io_state() != io_state_t::open || is_out_queued()) {
return seastar::make_ready_future<stop_t>(stop_t::no);
}
// still nothing pending to send after flush,
// open dispatching can ONLY stop now
ctx.exit_out_dispatching("exit-open", conn);
return seastar::make_ready_future<stop_t>(stop_t::yes);
});
}
auto to_ack = ack_left;
assert(to_ack == 0 || in_seq > 0);
return frame_assembler->write<false>(
sweep_out_pending_msgs_to_sent(
need_keepalive, next_keepalive_ack, to_ack > 0)
).then([this, prv_keepalive_ack=next_keepalive_ack, to_ack, &ctx] {
if (ctx.get_io_state() != io_state_t::open) {
return frame_assembler->flush<false>(
).then([] {
return seastar::make_ready_future<stop_t>(stop_t::no);
});
}
need_keepalive = false;
if (next_keepalive_ack == prv_keepalive_ack) {
next_keepalive_ack = std::nullopt;
}
assert(ack_left >= to_ack);
ack_left -= to_ack;
// FIXME: may leak a flush if state is changed after return and before
// the next repeat body.
return seastar::make_ready_future<stop_t>(stop_t::no);
});
}
case io_state_t::delay:
// delay out dispatching until open
ctx.notify_out_dispatching_stopped("delay...", conn);
return ctx.wait_state_change(
).then([] { return stop_t::no; });
case io_state_t::drop:
ctx.exit_out_dispatching("dropped", conn);
return seastar::make_ready_future<stop_t>(stop_t::yes);
case io_state_t::switched:
ctx.exit_out_dispatching("switched", conn);
return seastar::make_ready_future<stop_t>(stop_t::yes);
default:
ceph_abort("impossible");
}
}).handle_exception_type([this, &ctx](const std::system_error& e) {
auto io_state = ctx.get_io_state();
if (e.code() != std::errc::broken_pipe &&
e.code() != std::errc::connection_reset &&
e.code() != error::negotiation_failure) {
logger().error("{} do_out_dispatch(): unexpected error at {} -- {}",
conn, io_state, e.what());
ceph_abort();
}
if (io_state == io_state_t::open) {
auto cc_seq = crosscore.prepare_submit();
logger().info("{} do_out_dispatch(): fault at {}, {}, going to delay -- {}, "
"send {} notify_out_fault()",
conn, io_state, io_stat_printer{*this}, e.what(), cc_seq);
std::exception_ptr eptr;
try {
throw e;
} catch(...) {
eptr = std::current_exception();
}
do_set_io_state(io_state_t::delay);
shard_states->dispatch_in_background(
"notify_out_fault(out)", conn, [this, cc_seq, eptr] {
auto states = get_states();
return seastar::smp::submit_to(
conn.get_messenger_shard_id(), [this, cc_seq, eptr, states] {
return handshake_listener->notify_out_fault(
cc_seq, "do_out_dispatch", eptr, states);
});
});
} else {
if (io_state != io_state_t::switched) {
logger().info("{} do_out_dispatch(): fault at {}, {} -- {}",
conn, io_state, io_stat_printer{*this}, e.what());
} else {
logger().info("{} do_out_dispatch(): fault at {} -- {}",
conn, io_state, e.what());
}
}
return do_out_dispatch(ctx);
});
}
void IOHandler::maybe_notify_out_dispatch()
{
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
if (is_out_queued()) {
notify_out_dispatch();
}
}
void IOHandler::notify_out_dispatch()
{
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
assert(is_out_queued());
if (need_notify_out) {
auto cc_seq = crosscore.prepare_submit();
logger().debug("{} send {} notify_out()",
conn, cc_seq);
shard_states->dispatch_in_background(
"notify_out", conn, [this, cc_seq] {
return seastar::smp::submit_to(
conn.get_messenger_shard_id(), [this, cc_seq] {
return handshake_listener->notify_out(cc_seq);
});
});
}
if (shard_states->try_enter_out_dispatching()) {
shard_states->dispatch_in_background(
"do_out_dispatch", conn, [this] {
return do_out_dispatch(*shard_states);
});
}
}
seastar::future<>
IOHandler::read_message(
shard_states_t &ctx,
utime_t throttle_stamp,
std::size_t msg_size)
{
return frame_assembler->read_frame_payload<false>(
).then([this, throttle_stamp, msg_size, &ctx](auto payload) {
if (unlikely(ctx.get_io_state() != io_state_t::open)) {
logger().debug("{} triggered {} during read_message()",
conn, ctx.get_io_state());
abort_protocol();
}
utime_t recv_stamp{seastar::lowres_system_clock::now()};
// we need to get the size before std::moving segments data
auto msg_frame = MessageFrame::Decode(*payload);
// XXX: paranoid copy just to avoid oops
ceph_msg_header2 current_header = msg_frame.header();
logger().trace("{} got {} + {} + {} byte message,"
" envelope type={} src={} off={} seq={}",
conn,
msg_frame.front_len(),
msg_frame.middle_len(),
msg_frame.data_len(),
current_header.type,
conn.get_peer_name(),
current_header.data_off,
current_header.seq);
ceph_msg_header header{current_header.seq,
current_header.tid,
current_header.type,
current_header.priority,
current_header.version,
ceph_le32(msg_frame.front_len()),
ceph_le32(msg_frame.middle_len()),
ceph_le32(msg_frame.data_len()),
current_header.data_off,
conn.get_peer_name(),
current_header.compat_version,
current_header.reserved,
ceph_le32(0)};
ceph_msg_footer footer{ceph_le32(0), ceph_le32(0),
ceph_le32(0), ceph_le64(0), current_header.flags};
Message *message = decode_message(nullptr, 0, header, footer,
msg_frame.front(), msg_frame.middle(), msg_frame.data(), nullptr);
if (!message) {
logger().warn("{} decode message failed", conn);
abort_in_fault();
}
// store reservation size in message, so we don't get confused
// by messages entering the dispatch queue through other paths.
message->set_dispatch_throttle_size(msg_size);
message->set_throttle_stamp(throttle_stamp);
message->set_recv_stamp(recv_stamp);
message->set_recv_complete_stamp(utime_t{seastar::lowres_system_clock::now()});
// check received seq#. if it is old, drop the message.
// note that incoming messages may skip ahead. this is convenient for the
// client side queueing because messages can't be renumbered, but the (kernel)
// client will occasionally pull a message out of the sent queue to send
// elsewhere. in that case it doesn't matter if we "got" it or not.
uint64_t cur_seq = in_seq;
if (message->get_seq() <= cur_seq) {
logger().error("{} got old message {} <= {} {}, discarding",
conn, message->get_seq(), cur_seq, *message);
if (HAVE_FEATURE(conn.features, RECONNECT_SEQ) &&
local_conf()->ms_die_on_old_message) {
ceph_assert(0 == "old msgs despite reconnect_seq feature");
}
return seastar::now();
} else if (message->get_seq() > cur_seq + 1) {
logger().error("{} missed message? skipped from seq {} to {}",
conn, cur_seq, message->get_seq());
if (local_conf()->ms_die_on_skipped_message) {
ceph_assert(0 == "skipped incoming seq");
}
}
// note last received message.
in_seq = message->get_seq();
if (conn.policy.lossy) {
logger().debug("{} <== #{} === {} ({})",
conn,
message->get_seq(),
*message,
message->get_type());
} else {
logger().debug("{} <== #{},{} === {} ({})",
conn,
message->get_seq(),
current_header.ack_seq,
*message,
message->get_type());
}
// notify ack
if (!conn.policy.lossy) {
++ack_left;
notify_out_dispatch();
}
ack_out_sent(current_header.ack_seq);
// TODO: change MessageRef with seastar::shared_ptr
auto msg_ref = MessageRef{message, false};
assert(ctx.get_io_state() == io_state_t::open);
assert(get_io_state() == io_state_t::open);
ceph_assert_always(conn_ref);
// throttle the reading process by the returned future
return dispatchers.ms_dispatch(conn_ref, std::move(msg_ref));
// user can make changes
});
}
void IOHandler::do_in_dispatch()
{
shard_states->enter_in_dispatching();
shard_states->dispatch_in_background(
"do_in_dispatch", conn, [this, &ctx=*shard_states] {
return seastar::keep_doing([this, &ctx] {
return frame_assembler->read_main_preamble<false>(
).then([this, &ctx](auto ret) {
switch (ret.tag) {
case Tag::MESSAGE: {
size_t msg_size = get_msg_size(*ret.rx_frame_asm);
return seastar::futurize_invoke([this] {
// throttle_message() logic
if (!conn.policy.throttler_messages) {
return seastar::now();
}
// TODO: message throttler
ceph_abort("TODO");
return seastar::now();
}).then([this, msg_size] {
// throttle_bytes() logic
if (!conn.policy.throttler_bytes) {
return seastar::now();
}
if (!msg_size) {
return seastar::now();
}
logger().trace("{} wants {} bytes from policy throttler {}/{}",
conn, msg_size,
conn.policy.throttler_bytes->get_current(),
conn.policy.throttler_bytes->get_max());
return conn.policy.throttler_bytes->get(msg_size);
}).then([this, msg_size, &ctx] {
// TODO: throttle_dispatch_queue() logic
utime_t throttle_stamp{seastar::lowres_system_clock::now()};
return read_message(ctx, throttle_stamp, msg_size);
});
}
case Tag::ACK:
return frame_assembler->read_frame_payload<false>(
).then([this](auto payload) {
// handle_message_ack() logic
auto ack = AckFrame::Decode(payload->back());
logger().debug("{} GOT AckFrame: seq={}", conn, ack.seq());
ack_out_sent(ack.seq());
});
case Tag::KEEPALIVE2:
return frame_assembler->read_frame_payload<false>(
).then([this](auto payload) {
// handle_keepalive2() logic
auto keepalive_frame = KeepAliveFrame::Decode(payload->back());
logger().debug("{} GOT KeepAliveFrame: timestamp={}",
conn, keepalive_frame.timestamp());
// notify keepalive ack
next_keepalive_ack = keepalive_frame.timestamp();
if (seastar::this_shard_id() == get_shard_id()) {
notify_out_dispatch();
}
last_keepalive = seastar::lowres_system_clock::now();
});
case Tag::KEEPALIVE2_ACK:
return frame_assembler->read_frame_payload<false>(
).then([this](auto payload) {
// handle_keepalive2_ack() logic
auto keepalive_ack_frame = KeepAliveFrameAck::Decode(payload->back());
auto _last_keepalive_ack =
seastar::lowres_system_clock::time_point{keepalive_ack_frame.timestamp()};
set_last_keepalive_ack(_last_keepalive_ack);
logger().debug("{} GOT KeepAliveFrameAck: timestamp={}",
conn, _last_keepalive_ack);
});
default: {
logger().warn("{} do_in_dispatch() received unexpected tag: {}",
conn, static_cast<uint32_t>(ret.tag));
abort_in_fault();
}
}
});
}).handle_exception([this, &ctx](std::exception_ptr eptr) {
const char *e_what;
try {
std::rethrow_exception(eptr);
} catch (std::exception &e) {
e_what = e.what();
}
auto io_state = ctx.get_io_state();
if (io_state == io_state_t::open) {
auto cc_seq = crosscore.prepare_submit();
logger().info("{} do_in_dispatch(): fault at {}, {}, going to delay -- {}, "
"send {} notify_out_fault()",
conn, io_state, io_stat_printer{*this}, e_what, cc_seq);
do_set_io_state(io_state_t::delay);
shard_states->dispatch_in_background(
"notify_out_fault(in)", conn, [this, cc_seq, eptr] {
auto states = get_states();
return seastar::smp::submit_to(
conn.get_messenger_shard_id(), [this, cc_seq, eptr, states] {
return handshake_listener->notify_out_fault(
cc_seq, "do_in_dispatch", eptr, states);
});
});
} else {
if (io_state != io_state_t::switched) {
logger().info("{} do_in_dispatch(): fault at {}, {} -- {}",
conn, io_state, io_stat_printer{*this}, e_what);
} else {
logger().info("{} do_in_dispatch(): fault at {} -- {}",
conn, io_state, e_what);
}
}
}).finally([&ctx] {
ctx.exit_in_dispatching();
});
});
}
seastar::future<>
IOHandler::close_io(
crosscore_t::seq_t cc_seq,
bool is_dispatch_reset,
bool is_replace)
{
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
if (!crosscore.proceed_or_wait(cc_seq)) {
logger().debug("{} got {} close_io(), wait at {}",
conn, cc_seq, crosscore.get_in_seq());
return crosscore.wait(cc_seq
).then([this, cc_seq, is_dispatch_reset, is_replace] {
return close_io(cc_seq, is_dispatch_reset, is_replace);
});
}
logger().debug("{} got {} close_io(reset={}, replace={})",
conn, cc_seq, is_dispatch_reset, is_replace);
ceph_assert_always(get_io_state() == io_state_t::drop);
if (is_dispatch_reset) {
dispatch_reset(is_replace);
}
ceph_assert_always(conn_ref);
conn_ref.reset();
// cannot be running in parallel with to_new_sid()
if (maybe_dropped_sid.has_value()) {
assert(shard_states->assert_closed_and_exit());
auto prv_sid = *maybe_dropped_sid;
return cleanup_prv_shard(prv_sid);
} else {
return shard_states->close(
).then([this] {
assert(shard_states->assert_closed_and_exit());
});
}
}
/*
* IOHandler::shard_states_t
*/
void
IOHandler::shard_states_t::notify_out_dispatching_stopped(
const char *what, SocketConnection &conn)
{
assert(seastar::this_shard_id() == sid);
if (unlikely(out_exit_dispatching.has_value())) {
out_exit_dispatching->set_value();
out_exit_dispatching = std::nullopt;
logger().info("{} do_out_dispatch: stop({}) at {}, set out_exit_dispatching",
conn, what, io_state);
} else {
if (unlikely(io_state != io_state_t::open)) {
logger().info("{} do_out_dispatch: stop({}) at {}, no out_exit_dispatching",
conn, what, io_state);
}
}
}
seastar::future<>
IOHandler::shard_states_t::wait_io_exit_dispatching()
{
assert(seastar::this_shard_id() == sid);
assert(io_state != io_state_t::open);
assert(!gate.is_closed());
return seastar::when_all(
[this] {
if (out_exit_dispatching) {
return out_exit_dispatching->get_future();
} else {
return seastar::now();
}
}(),
[this] {
if (in_exit_dispatching) {
return in_exit_dispatching->get_future();
} else {
return seastar::now();
}
}()
).discard_result();
}
IOHandler::shard_states_ref_t
IOHandler::shard_states_t::create_from_previous(
shard_states_t &prv_states,
seastar::shard_id new_sid)
{
auto io_state = prv_states.io_state;
assert(io_state != io_state_t::open);
auto ret = shard_states_t::create(new_sid, io_state);
if (io_state == io_state_t::drop) {
// the new gate should not never be used
auto fut = ret->gate.close();
ceph_assert_always(fut.available());
}
prv_states.set_io_state(io_state_t::switched);
return ret;
}
} // namespace crimson::net
| 41,451 | 32.082203 | 92 | cc |
null | ceph-main/src/crimson/net/io_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/shared_future.hh>
#include <seastar/util/later.hh>
#include "crimson/common/gated.h"
#include "Fwd.h"
#include "SocketConnection.h"
#include "FrameAssemblerV2.h"
namespace crimson::net {
/**
* crosscore_t
*
* To preserve the event order across cores.
*/
class crosscore_t {
public:
using seq_t = uint64_t;
crosscore_t() = default;
~crosscore_t() = default;
seq_t get_in_seq() const {
return in_seq;
}
seq_t prepare_submit() {
++out_seq;
return out_seq;
}
bool proceed_or_wait(seq_t seq) {
if (seq == in_seq + 1) {
++in_seq;
if (unlikely(in_pr_wait.has_value())) {
in_pr_wait->set_value();
in_pr_wait = std::nullopt;
}
return true;
} else {
return false;
}
}
seastar::future<> wait(seq_t seq) {
assert(seq != in_seq + 1);
if (!in_pr_wait.has_value()) {
in_pr_wait = seastar::shared_promise<>();
}
return in_pr_wait->get_shared_future();
}
private:
seq_t out_seq = 0;
seq_t in_seq = 0;
std::optional<seastar::shared_promise<>> in_pr_wait;
};
/**
* io_handler_state
*
* It is required to populate the states from IOHandler to ProtocolV2
* asynchronously.
*/
struct io_handler_state {
seq_num_t in_seq;
bool is_out_queued;
bool has_out_sent;
bool is_out_queued_or_sent() const {
return is_out_queued || has_out_sent;
}
/*
* should be consistent with the accroding interfaces in IOHandler
*/
void reset_session(bool full) {
in_seq = 0;
if (full) {
is_out_queued = false;
has_out_sent = false;
}
}
void reset_peer_state() {
in_seq = 0;
is_out_queued = is_out_queued_or_sent();
has_out_sent = false;
}
void requeue_out_sent_up_to() {
// noop since the information is insufficient
}
void requeue_out_sent() {
if (has_out_sent) {
has_out_sent = false;
is_out_queued = true;
}
}
};
/**
* HandshakeListener
*
* The interface class for IOHandler to notify the ProtocolV2.
*
* The notifications may be cross-core and must be sent to
* SocketConnection::get_messenger_shard_id()
*/
class HandshakeListener {
public:
virtual ~HandshakeListener() = default;
HandshakeListener(const HandshakeListener&) = delete;
HandshakeListener(HandshakeListener &&) = delete;
HandshakeListener &operator=(const HandshakeListener &) = delete;
HandshakeListener &operator=(HandshakeListener &&) = delete;
virtual seastar::future<> notify_out(
crosscore_t::seq_t cc_seq) = 0;
virtual seastar::future<> notify_out_fault(
crosscore_t::seq_t cc_seq,
const char *where,
std::exception_ptr,
io_handler_state) = 0;
virtual seastar::future<> notify_mark_down(
crosscore_t::seq_t cc_seq) = 0;
protected:
HandshakeListener() = default;
};
/**
* IOHandler
*
* Implements the message read and write paths after the handshake, and also be
* responsible to dispatch events. It is supposed to be working on the same
* core with the underlying socket and the FrameAssemblerV2 class.
*/
class IOHandler final : public ConnectionHandler {
public:
IOHandler(ChainedDispatchers &,
SocketConnection &);
~IOHandler() final;
IOHandler(const IOHandler &) = delete;
IOHandler(IOHandler &&) = delete;
IOHandler &operator=(const IOHandler &) = delete;
IOHandler &operator=(IOHandler &&) = delete;
/*
* as ConnectionHandler
*/
public:
seastar::shard_id get_shard_id() const final {
return shard_states->get_shard_id();
}
bool is_connected() const final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
return protocol_is_connected;
}
seastar::future<> send(MessageFRef msg) final;
seastar::future<> send_keepalive() final;
clock_t::time_point get_last_keepalive() const final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
return last_keepalive;
}
clock_t::time_point get_last_keepalive_ack() const final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
return last_keepalive_ack;
}
void set_last_keepalive_ack(clock_t::time_point when) final {
ceph_assert_always(seastar::this_shard_id() == get_shard_id());
last_keepalive_ack = when;
}
void mark_down() final;
/*
* as IOHandler to be called by ProtocolV2 handshake
*
* The calls may be cross-core and asynchronous
*/
public:
/*
* should not be called cross-core
*/
void set_handshake_listener(HandshakeListener &hl) {
assert(seastar::this_shard_id() == get_shard_id());
ceph_assert_always(handshake_listener == nullptr);
handshake_listener = &hl;
}
io_handler_state get_states() const {
// might be called from prv_sid during wait_io_exit_dispatching()
return {in_seq, is_out_queued(), has_out_sent()};
}
struct io_stat_printer {
const IOHandler &io_handler;
};
void print_io_stat(std::ostream &out) const;
seastar::future<> set_accepted_sid(
crosscore_t::seq_t cc_seq,
seastar::shard_id sid,
ConnectionFRef conn_fref);
/*
* may be called cross-core
*/
seastar::future<> close_io(
crosscore_t::seq_t cc_seq,
bool is_dispatch_reset,
bool is_replace);
/**
* io_state_t
*
* The io_state is changed with the protocol state, to control the
* io behavior accordingly.
*/
enum class io_state_t : uint8_t {
none, // no IO is possible as the connection is not available to the user yet.
delay, // IO is delayed until open.
open, // Dispatch In and Out concurrently.
drop, // Drop IO as the connection is closed.
switched // IO is switched to a different core
// (is moved to maybe_prv_shard_states)
};
friend class fmt::formatter<io_state_t>;
seastar::future<> set_io_state(
crosscore_t::seq_t cc_seq,
io_state_t new_state,
FrameAssemblerV2Ref fa,
bool set_notify_out);
struct exit_dispatching_ret {
FrameAssemblerV2Ref frame_assembler;
io_handler_state io_states;
};
seastar::future<exit_dispatching_ret>
wait_io_exit_dispatching(
crosscore_t::seq_t cc_seq);
seastar::future<> reset_session(
crosscore_t::seq_t cc_seq,
bool full);
seastar::future<> reset_peer_state(
crosscore_t::seq_t cc_seq);
seastar::future<> requeue_out_sent_up_to(
crosscore_t::seq_t cc_seq,
seq_num_t msg_seq);
seastar::future<> requeue_out_sent(
crosscore_t::seq_t cc_seq);
seastar::future<> dispatch_accept(
crosscore_t::seq_t cc_seq,
seastar::shard_id new_sid,
ConnectionFRef,
bool is_replace);
seastar::future<> dispatch_connect(
crosscore_t::seq_t cc_seq,
seastar::shard_id new_sid,
ConnectionFRef);
private:
class shard_states_t;
using shard_states_ref_t = std::unique_ptr<shard_states_t>;
class shard_states_t {
public:
shard_states_t(seastar::shard_id _sid, io_state_t state)
: sid{_sid}, io_state{state} {}
seastar::shard_id get_shard_id() const {
return sid;
}
io_state_t get_io_state() const {
assert(seastar::this_shard_id() == sid);
return io_state;
}
void set_io_state(io_state_t new_state) {
assert(seastar::this_shard_id() == sid);
assert(io_state != new_state);
pr_io_state_changed.set_value();
pr_io_state_changed = seastar::promise<>();
if (io_state == io_state_t::open) {
// from open
if (out_dispatching) {
ceph_assert_always(!out_exit_dispatching.has_value());
out_exit_dispatching = seastar::promise<>();
}
}
io_state = new_state;
}
seastar::future<> wait_state_change() {
assert(seastar::this_shard_id() == sid);
return pr_io_state_changed.get_future();
}
template <typename Func>
void dispatch_in_background(
const char *what, SocketConnection &who, Func &&func) {
assert(seastar::this_shard_id() == sid);
ceph_assert_always(!gate.is_closed());
gate.dispatch_in_background(what, who, std::move(func));
}
void enter_in_dispatching() {
assert(seastar::this_shard_id() == sid);
assert(io_state == io_state_t::open);
ceph_assert_always(!in_exit_dispatching.has_value());
in_exit_dispatching = seastar::promise<>();
}
void exit_in_dispatching() {
assert(seastar::this_shard_id() == sid);
assert(io_state != io_state_t::open);
ceph_assert_always(in_exit_dispatching.has_value());
in_exit_dispatching->set_value();
in_exit_dispatching = std::nullopt;
}
bool try_enter_out_dispatching() {
assert(seastar::this_shard_id() == sid);
if (out_dispatching) {
// already dispatching out
return false;
}
switch (io_state) {
case io_state_t::open:
[[fallthrough]];
case io_state_t::delay:
out_dispatching = true;
return true;
case io_state_t::drop:
[[fallthrough]];
case io_state_t::switched:
// do not dispatch out
return false;
default:
ceph_abort("impossible");
}
}
void notify_out_dispatching_stopped(
const char *what, SocketConnection &conn);
void exit_out_dispatching(
const char *what, SocketConnection &conn) {
assert(seastar::this_shard_id() == sid);
ceph_assert_always(out_dispatching);
out_dispatching = false;
notify_out_dispatching_stopped(what, conn);
}
seastar::future<> wait_io_exit_dispatching();
seastar::future<> close() {
assert(seastar::this_shard_id() == sid);
assert(!gate.is_closed());
return gate.close();
}
bool assert_closed_and_exit() const {
assert(seastar::this_shard_id() == sid);
if (gate.is_closed()) {
ceph_assert_always(io_state == io_state_t::drop ||
io_state == io_state_t::switched);
ceph_assert_always(!out_dispatching);
ceph_assert_always(!out_exit_dispatching);
ceph_assert_always(!in_exit_dispatching);
return true;
} else {
return false;
}
}
static shard_states_ref_t create(
seastar::shard_id sid, io_state_t state) {
return std::make_unique<shard_states_t>(sid, state);
}
static shard_states_ref_t create_from_previous(
shard_states_t &prv_states, seastar::shard_id new_sid);
private:
const seastar::shard_id sid;
io_state_t io_state;
crimson::common::Gated gate;
seastar::promise<> pr_io_state_changed;
bool out_dispatching = false;
std::optional<seastar::promise<>> out_exit_dispatching;
std::optional<seastar::promise<>> in_exit_dispatching;
};
void do_set_io_state(
io_state_t new_state,
std::optional<crosscore_t::seq_t> cc_seq = std::nullopt,
FrameAssemblerV2Ref fa = nullptr,
bool set_notify_out = false);
io_state_t get_io_state() const {
return shard_states->get_io_state();
}
void do_requeue_out_sent();
void do_requeue_out_sent_up_to(seq_num_t seq);
void assign_frame_assembler(FrameAssemblerV2Ref);
seastar::future<> send_redirected(MessageFRef msg);
seastar::future<> do_send(MessageFRef msg);
seastar::future<> send_keepalive_redirected();
seastar::future<> do_send_keepalive();
seastar::future<> to_new_sid(
seastar::shard_id new_sid, ConnectionFRef);
void dispatch_reset(bool is_replace);
void dispatch_remote_reset();
bool is_out_queued() const {
return (!out_pending_msgs.empty() ||
ack_left > 0 ||
need_keepalive ||
next_keepalive_ack.has_value());
}
bool has_out_sent() const {
return !out_sent_msgs.empty();
}
void reset_in();
void reset_out();
void discard_out_sent();
seastar::future<> do_out_dispatch(shard_states_t &ctx);
ceph::bufferlist sweep_out_pending_msgs_to_sent(
bool require_keepalive,
std::optional<utime_t> maybe_keepalive_ack,
bool require_ack);
void maybe_notify_out_dispatch();
void notify_out_dispatch();
void ack_out_sent(seq_num_t seq);
seastar::future<> read_message(
shard_states_t &ctx,
utime_t throttle_stamp,
std::size_t msg_size);
void do_in_dispatch();
seastar::future<> cleanup_prv_shard(seastar::shard_id prv_sid);
private:
shard_states_ref_t shard_states;
crosscore_t crosscore;
// drop was happening in the previous sid
std::optional<seastar::shard_id> maybe_dropped_sid;
// the remaining states in the previous sid for cleanup, see to_new_sid()
shard_states_ref_t maybe_prv_shard_states;
ChainedDispatchers &dispatchers;
SocketConnection &conn;
// core local reference for dispatching, valid until reset/close
ConnectionRef conn_ref;
HandshakeListener *handshake_listener = nullptr;
FrameAssemblerV2Ref frame_assembler;
bool protocol_is_connected = false;
bool need_dispatch_reset = true;
/*
* out states for writing
*/
/// the seq num of the last transmitted message
seq_num_t out_seq = 0;
// messages to be resent after connection gets reset
std::deque<MessageFRef> out_pending_msgs;
// messages sent, but not yet acked by peer
std::deque<MessageFRef> out_sent_msgs;
bool need_keepalive = false;
std::optional<utime_t> next_keepalive_ack = std::nullopt;
uint64_t ack_left = 0;
bool need_notify_out = false;
/*
* in states for reading
*/
/// the seq num of the last received message
seq_num_t in_seq = 0;
clock_t::time_point last_keepalive;
clock_t::time_point last_keepalive_ack;
};
inline std::ostream& operator<<(
std::ostream& out, IOHandler::io_stat_printer stat) {
stat.io_handler.print_io_stat(out);
return out;
}
} // namespace crimson::net
template <>
struct fmt::formatter<crimson::net::io_handler_state> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(crimson::net::io_handler_state state, FormatContext& ctx) {
return fmt::format_to(
ctx.out(),
"io(in_seq={}, is_out_queued={}, has_out_sent={})",
state.in_seq,
state.is_out_queued,
state.has_out_sent);
}
};
template <>
struct fmt::formatter<crimson::net::IOHandler::io_state_t>
: fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(crimson::net::IOHandler::io_state_t state, FormatContext& ctx) {
using enum crimson::net::IOHandler::io_state_t;
std::string_view name;
switch (state) {
case none:
name = "none";
break;
case delay:
name = "delay";
break;
case open:
name = "open";
break;
case drop:
name = "drop";
break;
case switched:
name = "switched";
break;
}
return formatter<string_view>::format(name, ctx);
}
};
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::net::IOHandler::io_stat_printer> : fmt::ostream_formatter {};
#endif
| 15,166 | 23.863934 | 104 | h |
null | ceph-main/src/crimson/os/futurized_collection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "osd/osd_types.h"
namespace crimson::os {
class FuturizedStore;
class FuturizedCollection
: public boost::intrusive_ref_counter<FuturizedCollection,
boost::thread_safe_counter>
{
public:
FuturizedCollection(const coll_t& cid)
: cid{cid} {}
virtual ~FuturizedCollection() {}
virtual seastar::future<> flush() {
return seastar::make_ready_future<>();
}
virtual seastar::future<bool> flush_commit() {
return seastar::make_ready_future<bool>(true);
}
const coll_t& get_cid() const {
return cid;
}
private:
const coll_t cid;
};
using CollectionRef = boost::intrusive_ptr<FuturizedCollection>;
}
| 915 | 23.105263 | 70 | h |
null | ceph-main/src/crimson/os/futurized_store.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "futurized_store.h"
#include "cyanstore/cyan_store.h"
#ifdef WITH_BLUESTORE
#include "alienstore/alien_store.h"
#endif
#include "seastore/seastore.h"
namespace crimson::os {
std::unique_ptr<FuturizedStore>
FuturizedStore::create(const std::string& type,
const std::string& data,
const ConfigValues& values)
{
if (type == "cyanstore") {
using crimson::os::CyanStore;
return std::make_unique<CyanStore>(data);
} else if (type == "seastore") {
return crimson::os::seastore::make_seastore(
data);
} else {
using crimson::os::AlienStore;
#ifdef WITH_BLUESTORE
// use AlienStore as a fallback. It adapts e.g. BlueStore.
return std::make_unique<AlienStore>(type, data, values);
#else
ceph_abort_msgf("unsupported objectstore type: %s", type.c_str());
return {};
#endif
}
}
}
| 971 | 25.27027 | 70 | cc |
null | ceph-main/src/crimson/os/futurized_store.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <map>
#include <optional>
#include <vector>
#include <seastar/core/future.hh>
#include "os/Transaction.h"
#include "crimson/common/smp_helpers.h"
#include "crimson/common/smp_helpers.h"
#include "crimson/osd/exceptions.h"
#include "include/buffer_fwd.h"
#include "include/uuid.h"
#include "osd/osd_types.h"
namespace ceph::os {
class Transaction;
}
namespace crimson::os {
class FuturizedCollection;
class FuturizedStore {
public:
class Shard {
public:
Shard() = default;
virtual ~Shard() = default;
// no copying
explicit Shard(const Shard& o) = delete;
const Shard& operator=(const Shard& o) = delete;
using CollectionRef = boost::intrusive_ptr<FuturizedCollection>;
using read_errorator = crimson::errorator<crimson::ct_error::enoent,
crimson::ct_error::input_output_error>;
virtual read_errorator::future<ceph::bufferlist> read(
CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) = 0;
virtual read_errorator::future<ceph::bufferlist> readv(
CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) = 0;
using get_attr_errorator = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::enodata>;
virtual get_attr_errorator::future<ceph::bufferlist> get_attr(
CollectionRef c,
const ghobject_t& oid,
std::string_view name) const = 0;
using get_attrs_ertr = crimson::errorator<
crimson::ct_error::enoent>;
using attrs_t = std::map<std::string, ceph::bufferlist, std::less<>>;
virtual get_attrs_ertr::future<attrs_t> get_attrs(
CollectionRef c,
const ghobject_t& oid) = 0;
virtual seastar::future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) = 0;
using omap_values_t = std::map<std::string, ceph::bufferlist, std::less<>>;
using omap_keys_t = std::set<std::string>;
virtual read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) = 0;
virtual read_errorator::future<std::tuple<bool, omap_values_t>> omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) = 0; ///< @return <done, values> values.empty() only if done
virtual get_attr_errorator::future<bufferlist> omap_get_header(
CollectionRef c,
const ghobject_t& oid) = 0;
virtual seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>> list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const = 0;
virtual seastar::future<CollectionRef> create_new_collection(const coll_t& cid) = 0;
virtual seastar::future<CollectionRef> open_collection(const coll_t& cid) = 0;
protected:
virtual seastar::future<> do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& txn) = 0;
public:
seastar::future<> do_transaction(
CollectionRef ch,
ceph::os::Transaction&& txn) {
std::unique_ptr<Context> on_commit(
ceph::os::Transaction::collect_all_contexts(txn));
return do_transaction_no_callbacks(
std::move(ch), std::move(txn)
).then([on_commit=std::move(on_commit)]() mutable {
auto c = on_commit.release();
if (c) c->complete(0);
return seastar::now();
});
}
/**
* flush
*
* Flushes outstanding transactions on ch, returned future resolves
* after any previously submitted transactions on ch have committed.
*
* @param ch [in] collection on which to flush
*/
virtual seastar::future<> flush(CollectionRef ch) {
return do_transaction(ch, ceph::os::Transaction{});
}
// error injection
virtual seastar::future<> inject_data_error(const ghobject_t& o) {
return seastar::now();
}
virtual seastar::future<> inject_mdata_error(const ghobject_t& o) {
return seastar::now();
}
virtual read_errorator::future<std::map<uint64_t, uint64_t>> fiemap(
CollectionRef ch,
const ghobject_t& oid,
uint64_t off,
uint64_t len) = 0;
virtual unsigned get_max_attr_name_length() const = 0;
};
public:
static std::unique_ptr<FuturizedStore> create(const std::string& type,
const std::string& data,
const ConfigValues& values);
FuturizedStore()
: primary_core(seastar::this_shard_id())
{}
virtual ~FuturizedStore() = default;
// no copying
explicit FuturizedStore(const FuturizedStore& o) = delete;
const FuturizedStore& operator=(const FuturizedStore& o) = delete;
virtual seastar::future<> start() = 0;
virtual seastar::future<> stop() = 0;
using mount_ertr = crimson::errorator<crimson::stateful_ec>;
virtual mount_ertr::future<> mount() = 0;
virtual seastar::future<> umount() = 0;
using mkfs_ertr = crimson::errorator<crimson::stateful_ec>;
virtual mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) = 0;
virtual seastar::future<store_statfs_t> stat() const = 0;
virtual uuid_d get_fsid() const = 0;
virtual seastar::future<> write_meta(const std::string& key,
const std::string& value) = 0;
// called on the shard and get this FuturizedStore::shard;
virtual Shard& get_sharded_store() = 0;
virtual seastar::future<std::tuple<int, std::string>> read_meta(
const std::string& key) = 0;
using coll_core_t = std::pair<coll_t, core_id_t>;
virtual seastar::future<std::vector<coll_core_t>> list_collections() = 0;
protected:
const core_id_t primary_core;
};
}
| 5,948 | 29.352041 | 90 | h |
null | ceph-main/src/crimson/os/alienstore/alien_collection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "os/ObjectStore.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
#include "alien_store.h"
namespace crimson::os {
class AlienCollection final : public FuturizedCollection {
public:
AlienCollection(ObjectStore::CollectionHandle ch)
: FuturizedCollection(ch->cid),
collection(ch) {}
~AlienCollection() {}
template <typename Func, typename Result = std::invoke_result_t<Func>>
seastar::futurize_t<Result> with_lock(Func&& func) {
// newer versions of Seastar provide two variants of `with_lock`
// - generic, friendly towards throwing move constructors of Func,
// - specialized for `noexcept`.
// unfortunately, the former has a limitation: the return value
// of `Func` must be compatible with `current_exception_as_future()`
// which boils down to returning `seastar::future<void>`.
static_assert(std::is_nothrow_move_constructible_v<Func>);
return seastar::with_lock(mutex, std::forward<Func>(func));
}
private:
ObjectStore::CollectionHandle collection;
seastar::shared_mutex mutex;
friend AlienStore;
};
}
| 1,233 | 29.85 | 72 | h |
null | ceph-main/src/crimson/os/alienstore/alien_log.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "alien_log.h"
#include "log/SubsystemMap.h"
#include <seastar/core/alien.hh>
#include "crimson/common/log.h"
namespace ceph::logging {
CnLog::CnLog(const SubsystemMap *s, seastar::alien::instance& inst, unsigned shard)
:Log(s)
,inst(inst)
,shard(shard) {
}
CnLog::~CnLog() {
}
void CnLog::_flush(EntryVector& q, bool crash) {
seastar::alien::submit_to(inst, shard, [&q] {
for (auto& it : q) {
crimson::get_logger(it.m_subsys).log(
crimson::to_log_level(it.m_prio),
"{}",
it.strv());
}
return seastar::make_ready_future<>();
}).wait();
q.clear();
return;
}
} //namespace ceph::logging
| 752 | 21.147059 | 83 | cc |
null | ceph-main/src/crimson/os/alienstore/alien_log.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef ALIEN_LOG_H
#define ALIEN_LOG_H
#include "log/Log.h"
namespace ceph {
namespace logging {
class SubsystemMap;
}
}
namespace seastar::alien {
class instance;
}
namespace ceph::logging
{
class CnLog : public ceph::logging::Log
{
seastar::alien::instance& inst;
unsigned shard;
void _flush(EntryVector& q, bool crash) override;
public:
CnLog(const SubsystemMap *s, seastar::alien::instance& inst, unsigned shard);
~CnLog() override;
};
}
#endif
| 565 | 16.6875 | 79 | h |
null | ceph-main/src/crimson/os/alienstore/alien_store.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "alien_collection.h"
#include "alien_store.h"
#include "alien_log.h"
#include <algorithm>
#include <iterator>
#include <map>
#include <string_view>
#include <boost/algorithm/string/trim.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <seastar/core/alien.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/resource.hh>
#include "common/ceph_context.h"
#include "global/global_context.h"
#include "include/Context.h"
#include "os/ObjectStore.h"
#include "os/Transaction.h"
#include "crimson/common/config_proxy.h"
#include "crimson/common/log.h"
#include "crimson/os/futurized_store.h"
using std::map;
using std::set;
using std::string;
namespace {
seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_alienstore);
}
class OnCommit final: public Context
{
const int cpuid;
seastar::alien::instance &alien;
seastar::promise<> &alien_done;
public:
OnCommit(
int id,
seastar::promise<> &done,
seastar::alien::instance &alien,
ceph::os::Transaction& txn)
: cpuid(id),
alien(alien),
alien_done(done) {
}
void finish(int) final {
return seastar::alien::submit_to(alien, cpuid, [this] {
alien_done.set_value();
return seastar::make_ready_future<>();
}).wait();
}
};
}
namespace crimson::os {
using crimson::common::get_conf;
AlienStore::AlienStore(const std::string& type,
const std::string& path,
const ConfigValues& values)
: type(type),
path{path},
values(values)
{
}
AlienStore::~AlienStore()
{
}
seastar::future<> AlienStore::start()
{
cct = std::make_unique<CephContext>(
CEPH_ENTITY_TYPE_OSD,
CephContext::create_options { CODE_ENVIRONMENT_UTILITY, 0,
[](const ceph::logging::SubsystemMap* subsys_map) {
return new ceph::logging::CnLog(subsys_map, seastar::engine().alien(), seastar::this_shard_id());
}
}
);
g_ceph_context = cct.get();
cct->_conf.set_config_values(values);
cct->_log->start();
store = ObjectStore::create(cct.get(), type, path);
if (!store) {
ceph_abort_msgf("unsupported objectstore type: %s", type.c_str());
}
auto cpu_cores = seastar::resource::parse_cpuset(
get_conf<std::string>("crimson_alien_thread_cpu_cores"));
// cores except the first "N_CORES_FOR_SEASTAR" ones will
// be used for alien threads scheduling:
// [0, N_CORES_FOR_SEASTAR) are reserved for seastar reactors
// [N_CORES_FOR_SEASTAR, ..] are assigned to alien threads.
if (!cpu_cores.has_value()) {
seastar::resource::cpuset cpuset;
std::copy(boost::counting_iterator<unsigned>(N_CORES_FOR_SEASTAR),
boost::counting_iterator<unsigned>(sysconf(_SC_NPROCESSORS_ONLN)),
std::inserter(cpuset, cpuset.end()));
if (cpuset.empty()) {
logger().error("{}: unable to get nproc: {}", __func__, errno);
} else {
cpu_cores = cpuset;
}
}
const auto num_threads =
get_conf<uint64_t>("crimson_alien_op_num_threads");
tp = std::make_unique<crimson::os::ThreadPool>(num_threads, 128, cpu_cores);
return tp->start();
}
seastar::future<> AlienStore::stop()
{
if (!tp) {
// not really started yet
return seastar::now();
}
return tp->submit([this] {
for (auto [cid, ch]: coll_map) {
static_cast<AlienCollection*>(ch.get())->collection.reset();
}
store.reset();
cct.reset();
g_ceph_context = nullptr;
}).then([this] {
return tp->stop();
});
}
AlienStore::mount_ertr::future<> AlienStore::mount()
{
logger().debug("{}", __func__);
assert(tp);
return tp->submit([this] {
return store->mount();
}).then([] (const int r) -> mount_ertr::future<> {
if (r != 0) {
return crimson::stateful_ec{
std::error_code(-r, std::generic_category()) };
} else {
return mount_ertr::now();
}
});
}
seastar::future<> AlienStore::umount()
{
logger().info("{}", __func__);
if (!tp) {
// not really started yet
return seastar::now();
}
return op_gate.close().then([this] {
return tp->submit([this] {
return store->umount();
});
}).then([] (int r) {
assert(r == 0);
return seastar::now();
});
}
AlienStore::mkfs_ertr::future<> AlienStore::mkfs(uuid_d osd_fsid)
{
logger().debug("{}", __func__);
store->set_fsid(osd_fsid);
assert(tp);
return tp->submit([this] {
return store->mkfs();
}).then([] (int r) -> mkfs_ertr::future<> {
if (r != 0) {
return crimson::stateful_ec{
std::error_code(-r, std::generic_category()) };
} else {
return mkfs_ertr::now();
}
});
}
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>>
AlienStore::list_objects(CollectionRef ch,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const
{
logger().debug("{}", __func__);
assert(tp);
return do_with_op_gate(std::vector<ghobject_t>(), ghobject_t(),
[=, this] (auto &objects, auto &next) {
objects.reserve(limit);
return tp->submit(ch->get_cid().hash_to_shard(tp->size()),
[=, this, &objects, &next] {
auto c = static_cast<AlienCollection*>(ch.get());
return store->collection_list(c->collection, start, end,
store->get_ideal_list_max(),
&objects, &next);
}).then([&objects, &next] (int r) {
assert(r == 0);
return seastar::make_ready_future<
std::tuple<std::vector<ghobject_t>, ghobject_t>>(
std::move(objects), std::move(next));
});
});
}
seastar::future<CollectionRef> AlienStore::create_new_collection(const coll_t& cid)
{
logger().debug("{}", __func__);
assert(tp);
return tp->submit([this, cid] {
return store->create_new_collection(cid);
}).then([this, cid] (ObjectStore::CollectionHandle c) {
CollectionRef ch;
auto cp = coll_map.find(c->cid);
if (cp == coll_map.end()) {
ch = new AlienCollection(c);
coll_map[c->cid] = ch;
} else {
ch = cp->second;
auto ach = static_cast<AlienCollection*>(ch.get());
if (ach->collection != c) {
ach->collection = c;
}
}
return seastar::make_ready_future<CollectionRef>(ch);
});
}
seastar::future<CollectionRef> AlienStore::open_collection(const coll_t& cid)
{
logger().debug("{}", __func__);
assert(tp);
return tp->submit([this, cid] {
return store->open_collection(cid);
}).then([this] (ObjectStore::CollectionHandle c) {
if (!c) {
return seastar::make_ready_future<CollectionRef>();
}
CollectionRef ch;
auto cp = coll_map.find(c->cid);
if (cp == coll_map.end()){
ch = new AlienCollection(c);
coll_map[c->cid] = ch;
} else {
ch = cp->second;
auto ach = static_cast<AlienCollection*>(ch.get());
if (ach->collection != c){
ach->collection = c;
}
}
return seastar::make_ready_future<CollectionRef>(ch);
});
}
seastar::future<std::vector<coll_core_t>> AlienStore::list_collections()
{
logger().debug("{}", __func__);
assert(tp);
return do_with_op_gate(std::vector<coll_t>{}, [this] (auto &ls) {
return tp->submit([this, &ls] {
return store->list_collections(ls);
}).then([&ls] (int r) -> seastar::future<std::vector<coll_core_t>> {
assert(r == 0);
std::vector<coll_core_t> ret;
ret.resize(ls.size());
std::transform(
ls.begin(), ls.end(), ret.begin(),
[](auto p) { return std::make_pair(p, NULL_CORE); });
return seastar::make_ready_future<std::vector<coll_core_t>>(std::move(ret));
});
});
}
AlienStore::read_errorator::future<ceph::bufferlist>
AlienStore::read(CollectionRef ch,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags)
{
logger().debug("{}", __func__);
assert(tp);
return do_with_op_gate(ceph::bufferlist{}, [=, this] (auto &bl) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, this, &bl] {
auto c = static_cast<AlienCollection*>(ch.get());
return store->read(c->collection, oid, offset, len, bl, op_flags);
}).then([&bl] (int r) -> read_errorator::future<ceph::bufferlist> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else if (r == -EIO) {
return crimson::ct_error::input_output_error::make();
} else {
return read_errorator::make_ready_future<ceph::bufferlist>(
std::move(bl));
}
});
});
}
AlienStore::read_errorator::future<ceph::bufferlist>
AlienStore::readv(CollectionRef ch,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags)
{
logger().debug("{}", __func__);
assert(tp);
return do_with_op_gate(ceph::bufferlist{},
[this, ch, oid, &m, op_flags](auto& bl) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()),
[this, ch, oid, &m, op_flags, &bl] {
auto c = static_cast<AlienCollection*>(ch.get());
return store->readv(c->collection, oid, m, bl, op_flags);
}).then([&bl](int r) -> read_errorator::future<ceph::bufferlist> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else if (r == -EIO) {
return crimson::ct_error::input_output_error::make();
} else {
return read_errorator::make_ready_future<ceph::bufferlist>(
std::move(bl));
}
});
});
}
AlienStore::get_attr_errorator::future<ceph::bufferlist>
AlienStore::get_attr(CollectionRef ch,
const ghobject_t& oid,
std::string_view name) const
{
logger().debug("{}", __func__);
assert(tp);
return do_with_op_gate(ceph::bufferlist{}, std::string{name},
[=, this] (auto &value, const auto& name) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, this, &value, &name] {
// XXX: `name` isn't a `std::string_view` anymore! it had to be converted
// to `std::string` for the sake of extending life-time not only of
// a _ptr-to-data_ but _data_ as well. Otherwise we would run into a use-
// after-free issue.
auto c = static_cast<AlienCollection*>(ch.get());
return store->getattr(c->collection, oid, name.c_str(), value);
}).then([oid, &value](int r) -> get_attr_errorator::future<ceph::bufferlist> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else if (r == -ENODATA) {
return crimson::ct_error::enodata::make();
} else {
return get_attr_errorator::make_ready_future<ceph::bufferlist>(
std::move(value));
}
});
});
}
AlienStore::get_attrs_ertr::future<AlienStore::attrs_t>
AlienStore::get_attrs(CollectionRef ch,
const ghobject_t& oid)
{
logger().debug("{}", __func__);
assert(tp);
return do_with_op_gate(attrs_t{}, [=, this] (auto &aset) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, this, &aset] {
auto c = static_cast<AlienCollection*>(ch.get());
const auto r = store->getattrs(c->collection, oid, aset);
return r;
}).then([&aset] (int r) -> get_attrs_ertr::future<attrs_t> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else {
return get_attrs_ertr::make_ready_future<attrs_t>(std::move(aset));
}
});
});
}
auto AlienStore::omap_get_values(CollectionRef ch,
const ghobject_t& oid,
const set<string>& keys)
-> read_errorator::future<omap_values_t>
{
logger().debug("{}", __func__);
assert(tp);
return do_with_op_gate(omap_values_t{}, [=, this] (auto &values) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, this, &values] {
auto c = static_cast<AlienCollection*>(ch.get());
return store->omap_get_values(c->collection, oid, keys,
reinterpret_cast<map<string, bufferlist>*>(&values));
}).then([&values] (int r) -> read_errorator::future<omap_values_t> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else {
assert(r == 0);
return read_errorator::make_ready_future<omap_values_t>(
std::move(values));
}
});
});
}
auto AlienStore::omap_get_values(CollectionRef ch,
const ghobject_t &oid,
const std::optional<string> &start)
-> read_errorator::future<std::tuple<bool, omap_values_t>>
{
logger().debug("{} with_start", __func__);
assert(tp);
return do_with_op_gate(omap_values_t{}, [=, this] (auto &values) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, this, &values] {
auto c = static_cast<AlienCollection*>(ch.get());
return store->omap_get_values(c->collection, oid, start,
reinterpret_cast<map<string, bufferlist>*>(&values));
}).then([&values] (int r)
-> read_errorator::future<std::tuple<bool, omap_values_t>> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else if (r < 0){
logger().error("omap_get_values(start): {}", r);
return crimson::ct_error::input_output_error::make();
} else {
return read_errorator::make_ready_future<std::tuple<bool, omap_values_t>>(
true, std::move(values));
}
});
});
}
seastar::future<> AlienStore::do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& txn)
{
logger().debug("{}", __func__);
auto id = seastar::this_shard_id();
auto done = seastar::promise<>();
return do_with_op_gate(
std::move(txn),
std::move(done),
[this, ch, id] (auto &txn, auto &done) {
AlienCollection* alien_coll = static_cast<AlienCollection*>(ch.get());
// moving the `ch` is crucial for buildability on newer S* versions.
return alien_coll->with_lock([this, ch=std::move(ch), id, &txn, &done] {
assert(tp);
return tp->submit(ch->get_cid().hash_to_shard(tp->size()),
[this, ch, id, &txn, &done, &alien=seastar::engine().alien()] {
txn.register_on_commit(new OnCommit(id, done, alien, txn));
auto c = static_cast<AlienCollection*>(ch.get());
return store->queue_transaction(c->collection, std::move(txn));
});
}).then([&done] (int r) {
assert(r == 0);
return done.get_future();
});
});
}
seastar::future<> AlienStore::inject_data_error(const ghobject_t& o)
{
logger().debug("{}", __func__);
assert(tp);
return seastar::with_gate(op_gate, [=, this] {
return tp->submit([o, this] {
return store->inject_data_error(o);
});
});
}
seastar::future<> AlienStore::inject_mdata_error(const ghobject_t& o)
{
logger().debug("{}", __func__);
assert(tp);
return seastar::with_gate(op_gate, [=, this] {
return tp->submit([=, this] {
return store->inject_mdata_error(o);
});
});
}
seastar::future<> AlienStore::write_meta(const std::string& key,
const std::string& value)
{
logger().debug("{}", __func__);
assert(tp);
return seastar::with_gate(op_gate, [=, this] {
return tp->submit([=, this] {
return store->write_meta(key, value);
}).then([] (int r) {
assert(r == 0);
return seastar::make_ready_future<>();
});
});
}
seastar::future<std::tuple<int, std::string>>
AlienStore::read_meta(const std::string& key)
{
logger().debug("{}", __func__);
assert(tp);
return seastar::with_gate(op_gate, [this, key] {
return tp->submit([this, key] {
std::string value;
int r = store->read_meta(key, &value);
if (r > 0) {
value.resize(r);
boost::algorithm::trim_right_if(value,
[] (unsigned char c) {return isspace(c);});
} else {
value.clear();
}
return std::make_pair(r, value);
}).then([] (auto entry) {
return seastar::make_ready_future<std::tuple<int, std::string>>(
std::move(entry));
});
});
}
uuid_d AlienStore::get_fsid() const
{
logger().debug("{}", __func__);
return store->get_fsid();
}
seastar::future<store_statfs_t> AlienStore::stat() const
{
logger().info("{}", __func__);
assert(tp);
return do_with_op_gate(store_statfs_t{}, [this] (store_statfs_t &st) {
return tp->submit([this, &st] {
return store->statfs(&st, nullptr);
}).then([&st] (int r) {
assert(r == 0);
return seastar::make_ready_future<store_statfs_t>(std::move(st));
});
});
}
unsigned AlienStore::get_max_attr_name_length() const
{
logger().info("{}", __func__);
return 256;
}
seastar::future<struct stat> AlienStore::stat(
CollectionRef ch,
const ghobject_t& oid)
{
assert(tp);
return do_with_op_gate((struct stat){}, [this, ch, oid](auto& st) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [this, ch, oid, &st] {
auto c = static_cast<AlienCollection*>(ch.get());
store->stat(c->collection, oid, &st);
return st;
});
});
}
auto AlienStore::omap_get_header(CollectionRef ch,
const ghobject_t& oid)
-> get_attr_errorator::future<ceph::bufferlist>
{
assert(tp);
return do_with_op_gate(ceph::bufferlist(), [=, this](auto& bl) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, this, &bl] {
auto c = static_cast<AlienCollection*>(ch.get());
return store->omap_get_header(c->collection, oid, &bl);
}).then([&bl](int r) -> get_attr_errorator::future<ceph::bufferlist> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else if (r < 0) {
logger().error("omap_get_header: {}", r);
ceph_assert(0 == "impossible");
} else {
return get_attr_errorator::make_ready_future<ceph::bufferlist>(
std::move(bl));
}
});
});
}
AlienStore::read_errorator::future<std::map<uint64_t, uint64_t>> AlienStore::fiemap(
CollectionRef ch,
const ghobject_t& oid,
uint64_t off,
uint64_t len)
{
assert(tp);
return do_with_op_gate(std::map<uint64_t, uint64_t>(), [=, this](auto& destmap) {
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, this, &destmap] {
auto c = static_cast<AlienCollection*>(ch.get());
return store->fiemap(c->collection, oid, off, len, destmap);
}).then([&destmap](int r)
-> read_errorator::future<std::map<uint64_t, uint64_t>> {
if (r == -ENOENT) {
return crimson::ct_error::enoent::make();
} else {
return read_errorator::make_ready_future<std::map<uint64_t, uint64_t>>(
std::move(destmap));
}
});
});
}
}
| 18,895 | 29.428341 | 98 | cc |
null | ceph-main/src/crimson/os/alienstore/alien_store.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/shared_mutex.hh>
#include "common/ceph_context.h"
#include "os/ObjectStore.h"
#include "osd/osd_types.h"
#include "crimson/os/alienstore/thread_pool.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
namespace ceph::os {
class Transaction;
}
namespace crimson::os {
using coll_core_t = FuturizedStore::coll_core_t;
class AlienStore final : public FuturizedStore,
public FuturizedStore::Shard {
public:
AlienStore(const std::string& type,
const std::string& path,
const ConfigValues& values);
~AlienStore() final;
seastar::future<> start() final;
seastar::future<> stop() final;
mount_ertr::future<> mount() final;
seastar::future<> umount() final;
mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) final;
read_errorator::future<ceph::bufferlist> read(CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) final;
read_errorator::future<ceph::bufferlist> readv(CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) final;
get_attr_errorator::future<ceph::bufferlist> get_attr(CollectionRef c,
const ghobject_t& oid,
std::string_view name) const final;
get_attrs_ertr::future<attrs_t> get_attrs(CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) final;
/// Retrieves paged set of values > start (if present)
read_errorator::future<std::tuple<bool, omap_values_t>> omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) final; ///< @return <done, values> values.empty() iff done
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>> list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const final;
seastar::future<CollectionRef> create_new_collection(const coll_t& cid) final;
seastar::future<CollectionRef> open_collection(const coll_t& cid) final;
seastar::future<std::vector<coll_core_t>> list_collections() final;
seastar::future<> do_transaction_no_callbacks(
CollectionRef c,
ceph::os::Transaction&& txn) final;
// error injection
seastar::future<> inject_data_error(const ghobject_t& o) final;
seastar::future<> inject_mdata_error(const ghobject_t& o) final;
seastar::future<> write_meta(const std::string& key,
const std::string& value) final;
seastar::future<std::tuple<int, std::string>> read_meta(
const std::string& key) final;
uuid_d get_fsid() const final;
seastar::future<store_statfs_t> stat() const final;
unsigned get_max_attr_name_length() const final;
seastar::future<struct stat> stat(
CollectionRef,
const ghobject_t&) final;
get_attr_errorator::future<ceph::bufferlist> omap_get_header(
CollectionRef,
const ghobject_t&) final;
read_errorator::future<std::map<uint64_t, uint64_t>> fiemap(
CollectionRef,
const ghobject_t&,
uint64_t off,
uint64_t len) final;
FuturizedStore::Shard& get_sharded_store() final {
return *this;
}
private:
template <class... Args>
auto do_with_op_gate(Args&&... args) const {
return seastar::with_gate(op_gate,
// perfect forwarding in lambda's closure isn't available in C++17
// using tuple as workaround; see: https://stackoverflow.com/a/49902823
[args = std::make_tuple(std::forward<Args>(args)...)] () mutable {
return std::apply([] (auto&&... args) {
return seastar::do_with(std::forward<decltype(args)>(args)...);
}, std::move(args));
});
}
// number of cores that are PREVENTED from being scheduled
// to run alien store threads.
static constexpr int N_CORES_FOR_SEASTAR = 3;
mutable std::unique_ptr<crimson::os::ThreadPool> tp;
const std::string type;
const std::string path;
const ConfigValues values;
uint64_t used_bytes = 0;
std::unique_ptr<ObjectStore> store;
std::unique_ptr<CephContext> cct;
mutable seastar::gate op_gate;
std::unordered_map<coll_t, CollectionRef> coll_map;
};
}
| 4,743 | 34.402985 | 80 | h |
null | ceph-main/src/crimson/os/alienstore/semaphore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <semaphore.h>
#include <ctime>
#include <cerrno>
#include <exception>
#include <chrono>
namespace crimson {
// an implementation of std::counting_semaphore<> in C++17 using the POSIX
// semaphore.
//
// LeastMaxValue is ignored, as we don't have different backends optimized
// for different LeastMaxValues
template<unsigned LeastMaxValue = 64>
class counting_semaphore {
using clock_t = std::chrono::system_clock;
public:
explicit counting_semaphore(unsigned count) noexcept {
sem_init(&sem, 0, count);
}
counting_semaphore(const counting_semaphore&) = delete;
counting_semaphore& operator=(const counting_semaphore&) = delete;
~counting_semaphore() {
sem_destroy(&sem);
}
void acquire() noexcept {
for (;;) {
int err = sem_wait(&sem);
if (err != 0) {
if (errno == EINTR) {
continue;
} else {
std::terminate();
}
} else {
break;
}
}
}
void release(unsigned update = 1) {
for (; update != 0; --update) {
int err = sem_post(&sem);
if (err != 0) {
std::terminate();
}
}
}
template<typename Clock, typename Duration>
bool try_acquire_until(const std::chrono::time_point<Clock, Duration>& abs_time) noexcept {
auto s = std::chrono::time_point_cast<std::chrono::seconds>(abs_time);
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(abs_time - s);
struct timespec ts = {
static_cast<std::time_t>(s.time_since_epoch().count()),
static_cast<long>(ns.count())
};
for (;;) {
if (int err = sem_timedwait(&sem, &ts); err) {
if (errno == EINTR) {
continue;
} else if (errno == ETIMEDOUT || errno == EINVAL) {
return false;
} else {
std::terminate();
}
} else {
break;
}
}
return true;
}
template<typename Rep, typename Period>
bool try_acquire_for(const std::chrono::duration<Rep, Period>& rel_time) {
return try_acquire_until(clock_t::now() + rel_time);
}
private:
sem_t sem;
};
}
| 2,219 | 23.395604 | 93 | h |
null | ceph-main/src/crimson/os/alienstore/thread_pool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#include "thread_pool.h"
#include <chrono>
#include <pthread.h>
#include "include/ceph_assert.h"
#include "crimson/common/config_proxy.h"
using crimson::common::local_conf;
namespace crimson::os {
ThreadPool::ThreadPool(size_t n_threads,
size_t queue_sz,
const std::optional<seastar::resource::cpuset>& cpus)
: n_threads(n_threads),
queue_size{round_up_to(queue_sz, seastar::smp::count)},
pending_queues(n_threads)
{
auto queue_max_wait = std::chrono::seconds(local_conf()->threadpool_empty_queue_max_wait);
for (size_t i = 0; i < n_threads; i++) {
threads.emplace_back([this, cpus, queue_max_wait, i] {
if (cpus.has_value()) {
pin(*cpus);
}
block_sighup();
(void) pthread_setname_np(pthread_self(), "alien-store-tp");
loop(queue_max_wait, i);
});
}
}
ThreadPool::~ThreadPool()
{
for (auto& thread : threads) {
thread.join();
}
}
void ThreadPool::pin(const seastar::resource::cpuset& cpus)
{
cpu_set_t cs;
CPU_ZERO(&cs);
for (auto cpu : cpus) {
CPU_SET(cpu, &cs);
}
[[maybe_unused]] auto r = pthread_setaffinity_np(pthread_self(),
sizeof(cs), &cs);
ceph_assert(r == 0);
}
void ThreadPool::block_sighup()
{
sigset_t sigs;
sigemptyset(&sigs);
// alien threads must ignore the SIGHUP. It's necessary as in
// `crimson/osd/main.cc` we set a handler using the Seastar's
// signal handling infrastrucute which assumes the `_backend`
// of `seastar::engine()` is not null. Grep `reactor.cc` for
// `sigaction` or just visit `reactor::signals::handle_signal()`.
sigaddset(&sigs, SIGHUP);
pthread_sigmask(SIG_BLOCK, &sigs, nullptr);
}
void ThreadPool::loop(std::chrono::milliseconds queue_max_wait, size_t shard)
{
auto& pending = pending_queues[shard];
for (;;) {
WorkItem* work_item = nullptr;
work_item = pending.pop_front(queue_max_wait);
if (work_item) {
work_item->process();
} else if (is_stopping()) {
break;
}
}
}
seastar::future<> ThreadPool::start()
{
auto slots_per_shard = queue_size / seastar::smp::count;
return submit_queue.start(slots_per_shard);
}
seastar::future<> ThreadPool::stop()
{
return submit_queue.stop().then([this] {
stopping = true;
for (auto& q : pending_queues) {
q.stop();
}
});
}
} // namespace crimson::os
| 2,520 | 24.464646 | 92 | cc |
null | ceph-main/src/crimson/os/alienstore/thread_pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <atomic>
#include <condition_variable>
#include <tuple>
#include <type_traits>
#include <boost/lockfree/queue.hpp>
#include <boost/optional.hpp>
#include <seastar/core/future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/resource.hh>
#include <seastar/core/semaphore.hh>
#include <seastar/core/sharded.hh>
#if __cplusplus > 201703L
#include <semaphore>
namespace crimson {
using std::counting_semaphore;
}
#else
#include "semaphore.h"
#endif
namespace crimson::os {
struct WorkItem {
virtual ~WorkItem() {}
virtual void process() = 0;
};
template<typename Func>
struct Task final : WorkItem {
using T = std::invoke_result_t<Func>;
using future_stored_type_t =
std::conditional_t<std::is_void_v<T>,
seastar::internal::future_stored_type_t<>,
seastar::internal::future_stored_type_t<T>>;
using futurator_t = seastar::futurize<T>;
public:
explicit Task(Func&& f)
: func(std::move(f))
{}
void process() override {
try {
if constexpr (std::is_void_v<T>) {
func();
state.set();
} else {
state.set(func());
}
} catch (...) {
state.set_exception(std::current_exception());
}
on_done.write_side().signal(1);
}
typename futurator_t::type get_future() {
return on_done.wait().then([this](size_t) {
if (state.failed()) {
return futurator_t::make_exception_future(state.get_exception());
} else {
return futurator_t::from_tuple(state.get_value());
}
});
}
private:
Func func;
seastar::future_state<future_stored_type_t> state;
seastar::readable_eventfd on_done;
};
struct SubmitQueue {
seastar::semaphore free_slots;
seastar::gate pending_tasks;
explicit SubmitQueue(size_t num_free_slots)
: free_slots(num_free_slots)
{}
seastar::future<> stop() {
return pending_tasks.close();
}
};
struct ShardedWorkQueue {
public:
WorkItem* pop_front(std::chrono::milliseconds& queue_max_wait) {
if (sem.try_acquire_for(queue_max_wait)) {
if (!is_stopping()) {
WorkItem* work_item = nullptr;
[[maybe_unused]] bool popped = pending.pop(work_item);
assert(popped);
return work_item;
}
}
return nullptr;
}
void stop() {
stopping = true;
sem.release();
}
void push_back(WorkItem* work_item) {
[[maybe_unused]] bool pushed = pending.push(work_item);
assert(pushed);
sem.release();
}
private:
bool is_stopping() const {
return stopping;
}
std::atomic<bool> stopping = false;
static constexpr unsigned QUEUE_SIZE = 128;
crimson::counting_semaphore<QUEUE_SIZE> sem{0};
boost::lockfree::queue<WorkItem*> pending{QUEUE_SIZE};
};
/// an engine for scheduling non-seastar tasks from seastar fibers
class ThreadPool {
public:
/**
* @param queue_sz the depth of pending queue. before a task is scheduled,
* it waits in this queue. we will round this number to
* multiple of the number of cores.
* @param n_threads the number of threads in this thread pool.
* @param cpu the CPU core to which this thread pool is assigned
* @note each @c Task has its own crimson::thread::Condition, which possesses
* an fd, so we should keep the size of queue under a reasonable limit.
*/
ThreadPool(size_t n_threads, size_t queue_sz, const std::optional<seastar::resource::cpuset>& cpus);
~ThreadPool();
seastar::future<> start();
seastar::future<> stop();
size_t size() {
return n_threads;
}
template<typename Func, typename...Args>
auto submit(int shard, Func&& func, Args&&... args) {
auto packaged = [func=std::move(func),
args=std::forward_as_tuple(args...)] {
return std::apply(std::move(func), std::move(args));
};
return seastar::with_gate(submit_queue.local().pending_tasks,
[packaged=std::move(packaged), shard, this] {
return local_free_slots().wait()
.then([packaged=std::move(packaged), shard, this] {
auto task = new Task{std::move(packaged)};
auto fut = task->get_future();
pending_queues[shard].push_back(task);
return fut.finally([task, this] {
local_free_slots().signal();
delete task;
});
});
});
}
template<typename Func>
auto submit(Func&& func) {
return submit(::rand() % n_threads, std::forward<Func>(func));
}
private:
void loop(std::chrono::milliseconds queue_max_wait, size_t shard);
bool is_stopping() const {
return stopping.load(std::memory_order_relaxed);
}
static void pin(const seastar::resource::cpuset& cpus);
static void block_sighup();
seastar::semaphore& local_free_slots() {
return submit_queue.local().free_slots;
}
ThreadPool(const ThreadPool&) = delete;
ThreadPool& operator=(const ThreadPool&) = delete;
private:
size_t n_threads;
std::atomic<bool> stopping = false;
std::vector<std::thread> threads;
seastar::sharded<SubmitQueue> submit_queue;
const size_t queue_size;
std::vector<ShardedWorkQueue> pending_queues;
};
} // namespace crimson::os
| 5,335 | 27.843243 | 102 | h |
null | ceph-main/src/crimson/os/cyanstore/cyan_collection.cc | #include "cyan_collection.h"
#include "cyan_object.h"
using std::make_pair;
namespace crimson::os
{
Collection::Collection(const coll_t& c)
: FuturizedCollection{c}
{}
Collection::~Collection() = default;
Collection::ObjectRef Collection::create_object() const
{
return new crimson::os::Object;
}
Collection::ObjectRef Collection::get_object(ghobject_t oid)
{
auto o = object_hash.find(oid);
if (o == object_hash.end())
return ObjectRef();
return o->second;
}
Collection::ObjectRef Collection::get_or_create_object(ghobject_t oid)
{
auto result = object_hash.emplace(oid, ObjectRef{});
if (result.second)
object_map[oid] = result.first->second = create_object();
return result.first->second;
}
uint64_t Collection::used_bytes() const
{
uint64_t result = 0;
for (auto& obj : object_map) {
result += obj.second->get_size();
}
return result;
}
void Collection::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
encode(xattr, bl);
encode(use_page_set, bl);
uint32_t s = object_map.size();
encode(s, bl);
for (auto& [oid, obj] : object_map) {
encode(oid, bl);
obj->encode(bl);
}
ENCODE_FINISH(bl);
}
void Collection::decode(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
decode(xattr, p);
decode(use_page_set, p);
uint32_t s;
decode(s, p);
while (s--) {
ghobject_t k;
decode(k, p);
auto o = create_object();
o->decode(p);
object_map.insert(make_pair(k, o));
object_hash.insert(make_pair(k, o));
}
DECODE_FINISH(p);
}
}
| 1,537 | 18.468354 | 70 | cc |
null | ceph-main/src/crimson/os/cyanstore/cyan_collection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <unordered_map>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "include/buffer.h"
#include "osd/osd_types.h"
#include "crimson/os/futurized_collection.h"
namespace crimson::os {
class Object;
/**
* a collection also orders transactions
*
* Any transactions queued under a given collection will be applied in
* sequence. Transactions queued under different collections may run
* in parallel.
*
* ObjectStore users may get collection handles with open_collection() (or,
* for bootstrapping a new collection, create_new_collection()).
*/
struct Collection final : public FuturizedCollection {
using ObjectRef = boost::intrusive_ptr<Object>;
int bits = 0;
// always use bufferlist object for testing
bool use_page_set = false;
std::unordered_map<ghobject_t, ObjectRef> object_hash; ///< for lookup
std::map<ghobject_t, ObjectRef> object_map; ///< for iteration
std::map<std::string,bufferptr> xattr;
bool exists = true;
Collection(const coll_t& c);
~Collection() final;
ObjectRef create_object() const;
ObjectRef get_object(ghobject_t oid);
ObjectRef get_or_create_object(ghobject_t oid);
uint64_t used_bytes() const;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
}
| 1,446 | 26.826923 | 75 | h |
null | ceph-main/src/crimson/os/cyanstore/cyan_object.cc | #include "cyan_object.h"
#include "include/encoding.h"
namespace crimson::os {
size_t Object::get_size() const {
return data.length();
}
ceph::bufferlist Object::read(uint64_t offset, uint64_t len)
{
bufferlist ret;
ret.substr_of(data, offset, len);
return ret;
}
int Object::write(uint64_t offset, const bufferlist &src)
{
unsigned len = src.length();
// before
bufferlist newdata;
if (get_size() >= offset) {
newdata.substr_of(data, 0, offset);
} else {
if (get_size()) {
newdata.substr_of(data, 0, get_size());
}
newdata.append_zero(offset - get_size());
}
newdata.append(src);
// after
if (get_size() > offset + len) {
bufferlist tail;
tail.substr_of(data, offset + len, get_size() - (offset + len));
newdata.append(tail);
}
data = std::move(newdata);
return 0;
}
int Object::clone(Object *src, uint64_t srcoff, uint64_t len,
uint64_t dstoff)
{
bufferlist bl;
if (srcoff == dstoff && len == src->get_size()) {
data = src->data;
return 0;
}
bl.substr_of(src->data, srcoff, len);
return write(dstoff, bl);
}
int Object::truncate(uint64_t size)
{
if (get_size() > size) {
bufferlist bl;
bl.substr_of(data, 0, size);
data = std::move(bl);
} else if (get_size() == size) {
// do nothing
} else {
data.append_zero(size - get_size());
}
return 0;
}
void Object::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(data, bl);
encode(xattr, bl);
encode(omap_header, bl);
encode(omap, bl);
ENCODE_FINISH(bl);
}
void Object::decode(bufferlist::const_iterator& p) {
DECODE_START(1, p);
decode(data, p);
decode(xattr, p);
decode(omap_header, p);
decode(omap, p);
DECODE_FINISH(p);
}
}
| 1,755 | 18.511111 | 68 | cc |
null | ceph-main/src/crimson/os/cyanstore/cyan_object.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstddef>
#include <map>
#include <string>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "include/buffer.h"
namespace crimson::os {
struct Object : public boost::intrusive_ref_counter<
Object,
boost::thread_unsafe_counter>
{
using bufferlist = ceph::bufferlist;
bufferlist data;
// use transparent comparator for better performance, see
// https://en.cppreference.com/w/cpp/utility/functional/less_void
std::map<std::string,bufferlist,std::less<>> xattr;
bufferlist omap_header;
std::map<std::string,bufferlist> omap;
typedef boost::intrusive_ptr<Object> Ref;
Object() = default;
// interface for object data
size_t get_size() const;
ceph::bufferlist read(uint64_t offset, uint64_t len);
int write(uint64_t offset, const bufferlist &bl);
int clone(Object *src, uint64_t srcoff, uint64_t len,
uint64_t dstoff);
int truncate(uint64_t offset);
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
using ObjectRef = boost::intrusive_ptr<Object>;
}
| 1,198 | 25.065217 | 70 | h |
null | ceph-main/src/crimson/os/cyanstore/cyan_store.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cyan_store.h"
#include <boost/algorithm/string/trim.hpp>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include "common/safe_io.h"
#include "os/Transaction.h"
#include "crimson/common/buffer_io.h"
#include "crimson/common/config_proxy.h"
#include "cyan_collection.h"
#include "cyan_object.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_cyanstore);
}
}
using std::string;
using crimson::common::local_conf;
namespace crimson::os {
using ObjectRef = boost::intrusive_ptr<Object>;
CyanStore::CyanStore(const std::string& path)
: path{path}
{}
CyanStore::~CyanStore() = default;
template <const char* MsgV>
struct singleton_ec : std::error_code {
singleton_ec()
: error_code(42, this_error_category{}) {
};
private:
struct this_error_category : std::error_category {
const char* name() const noexcept final {
// XXX: we could concatenate with MsgV at compile-time but the burden
// isn't worth the benefit.
return "singleton_ec";
}
std::string message([[maybe_unused]] const int ev) const final {
assert(ev == 42);
return MsgV;
}
};
};
seastar::future<store_statfs_t> CyanStore::stat() const
{
ceph_assert(seastar::this_shard_id() == primary_core);
logger().debug("{}", __func__);
return shard_stores.map_reduce0(
[](const CyanStore::Shard &local_store) {
return local_store.get_used_bytes();
},
(uint64_t)0,
std::plus<uint64_t>()
).then([](uint64_t used_bytes) {
store_statfs_t st;
st.total = crimson::common::local_conf().get_val<Option::size_t>("memstore_device_bytes");
st.available = st.total - used_bytes;
return seastar::make_ready_future<store_statfs_t>(std::move(st));
});
}
CyanStore::mkfs_ertr::future<> CyanStore::mkfs(uuid_d new_osd_fsid)
{
ceph_assert(seastar::this_shard_id() == primary_core);
static const char read_meta_errmsg[]{"read_meta"};
static const char parse_fsid_errmsg[]{"failed to parse fsid"};
static const char match_ofsid_errmsg[]{"unmatched osd_fsid"};
return read_meta("fsid").then([=, this](auto&& ret) -> mkfs_ertr::future<> {
auto& [r, fsid_str] = ret;
if (r == -ENOENT) {
if (new_osd_fsid.is_zero()) {
osd_fsid.generate_random();
} else {
osd_fsid = new_osd_fsid;
}
return write_meta("fsid", fmt::format("{}", osd_fsid));
} else if (r < 0) {
return crimson::stateful_ec{ singleton_ec<read_meta_errmsg>() };
} else {
logger().info("mkfs already has fsid {}", fsid_str);
if (!osd_fsid.parse(fsid_str.c_str())) {
return crimson::stateful_ec{ singleton_ec<parse_fsid_errmsg>() };
} else if (osd_fsid != new_osd_fsid) {
logger().error("on-disk fsid {} != provided {}", osd_fsid, new_osd_fsid);
return crimson::stateful_ec{ singleton_ec<match_ofsid_errmsg>() };
} else {
return mkfs_ertr::now();
}
}
}).safe_then([this]{
return write_meta("type", "memstore");
}).safe_then([this] {
return shard_stores.invoke_on_all(
[](auto &local_store) {
return local_store.mkfs();
});
});
}
seastar::future<> CyanStore::Shard::mkfs()
{
std::string fn =
path + "/collections" + std::to_string(seastar::this_shard_id());
ceph::bufferlist bl;
std::set<coll_t> collections;
ceph::encode(collections, bl);
return crimson::write_file(std::move(bl), fn);
}
using coll_core_t = FuturizedStore::coll_core_t;
seastar::future<std::vector<coll_core_t>>
CyanStore::list_collections()
{
ceph_assert(seastar::this_shard_id() == primary_core);
return seastar::do_with(std::vector<coll_core_t>{}, [this](auto &collections) {
return shard_stores.map([](auto &local_store) {
return local_store.list_collections();
}).then([&collections](std::vector<std::vector<coll_core_t>> results) {
for (auto& colls : results) {
collections.insert(collections.end(), colls.begin(), colls.end());
}
return seastar::make_ready_future<std::vector<coll_core_t>>(
std::move(collections));
});
});
}
CyanStore::mount_ertr::future<> CyanStore::Shard::mount()
{
static const char read_file_errmsg[]{"read_file"};
ceph::bufferlist bl;
std::string fn =
path + "/collections" + std::to_string(seastar::this_shard_id());
std::string err;
if (int r = bl.read_file(fn.c_str(), &err); r < 0) {
return crimson::stateful_ec{ singleton_ec<read_file_errmsg>() };
}
std::set<coll_t> collections;
auto p = bl.cbegin();
ceph::decode(collections, p);
for (auto& coll : collections) {
std::string fn = fmt::format("{}/{}{}", path, coll,
std::to_string(seastar::this_shard_id()));
ceph::bufferlist cbl;
if (int r = cbl.read_file(fn.c_str(), &err); r < 0) {
return crimson::stateful_ec{ singleton_ec<read_file_errmsg>() };
}
boost::intrusive_ptr<Collection> c{new Collection{coll}};
auto p = cbl.cbegin();
c->decode(p);
coll_map[coll] = c;
used_bytes += c->used_bytes();
}
return mount_ertr::now();
}
seastar::future<> CyanStore::Shard::umount()
{
return seastar::do_with(std::set<coll_t>{}, [this](auto& collections) {
return seastar::do_for_each(coll_map, [&collections, this](auto& coll) {
auto& [col, ch] = coll;
collections.insert(col);
ceph::bufferlist bl;
ceph_assert(ch);
ch->encode(bl);
std::string fn = fmt::format("{}/{}{}", path, col,
std::to_string(seastar::this_shard_id()));
return crimson::write_file(std::move(bl), fn);
}).then([&collections, this] {
ceph::bufferlist bl;
ceph::encode(collections, bl);
std::string fn = fmt::format("{}/collections{}",
path, std::to_string(seastar::this_shard_id()));
return crimson::write_file(std::move(bl), fn);
});
});
}
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>>
CyanStore::Shard::list_objects(
CollectionRef ch,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const
{
auto c = static_cast<Collection*>(ch.get());
logger().debug("{} {} {} {} {}",
__func__, c->get_cid(), start, end, limit);
std::vector<ghobject_t> objects;
objects.reserve(limit);
ghobject_t next = ghobject_t::get_max();
for (const auto& [oid, obj] :
boost::make_iterator_range(c->object_map.lower_bound(start),
c->object_map.end())) {
std::ignore = obj;
if (oid >= end || objects.size() >= limit) {
next = oid;
break;
}
objects.push_back(oid);
}
return seastar::make_ready_future<std::tuple<std::vector<ghobject_t>, ghobject_t>>(
std::make_tuple(std::move(objects), next));
}
seastar::future<CollectionRef>
CyanStore::Shard::create_new_collection(const coll_t& cid)
{
auto c = new Collection{cid};
new_coll_map[cid] = c;
return seastar::make_ready_future<CollectionRef>(c);
}
seastar::future<CollectionRef>
CyanStore::Shard::open_collection(const coll_t& cid)
{
return seastar::make_ready_future<CollectionRef>(_get_collection(cid));
}
seastar::future<std::vector<coll_core_t>>
CyanStore::Shard::list_collections()
{
std::vector<coll_core_t> collections;
for (auto& coll : coll_map) {
collections.push_back(std::make_pair(coll.first, seastar::this_shard_id()));
}
return seastar::make_ready_future<std::vector<coll_core_t>>(std::move(collections));
}
CyanStore::Shard::read_errorator::future<ceph::bufferlist>
CyanStore::Shard::read(
CollectionRef ch,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags)
{
auto c = static_cast<Collection*>(ch.get());
logger().debug("{} {} {} {}~{}",
__func__, c->get_cid(), oid, offset, len);
if (!c->exists) {
return crimson::ct_error::enoent::make();
}
ObjectRef o = c->get_object(oid);
if (!o) {
return crimson::ct_error::enoent::make();
}
if (offset >= o->get_size())
return read_errorator::make_ready_future<ceph::bufferlist>();
size_t l = len;
if (l == 0 && offset == 0) // note: len == 0 means read the entire object
l = o->get_size();
else if (offset + l > o->get_size())
l = o->get_size() - offset;
return read_errorator::make_ready_future<ceph::bufferlist>(o->read(offset, l));
}
CyanStore::Shard::read_errorator::future<ceph::bufferlist>
CyanStore::Shard::readv(
CollectionRef ch,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags)
{
return seastar::do_with(ceph::bufferlist{},
[this, ch, oid, &m, op_flags](auto& bl) {
return crimson::do_for_each(m,
[this, ch, oid, op_flags, &bl](auto& p) {
return read(ch, oid, p.first, p.second, op_flags)
.safe_then([&bl](auto ret) {
bl.claim_append(ret);
});
}).safe_then([&bl] {
return read_errorator::make_ready_future<ceph::bufferlist>(std::move(bl));
});
});
}
CyanStore::Shard::get_attr_errorator::future<ceph::bufferlist>
CyanStore::Shard::get_attr(
CollectionRef ch,
const ghobject_t& oid,
std::string_view name) const
{
auto c = static_cast<Collection*>(ch.get());
logger().debug("{} {} {}",
__func__, c->get_cid(), oid);
auto o = c->get_object(oid);
if (!o) {
return crimson::ct_error::enoent::make();
}
if (auto found = o->xattr.find(name); found != o->xattr.end()) {
return get_attr_errorator::make_ready_future<ceph::bufferlist>(found->second);
} else {
return crimson::ct_error::enodata::make();
}
}
CyanStore::Shard::get_attrs_ertr::future<CyanStore::Shard::attrs_t>
CyanStore::Shard::get_attrs(
CollectionRef ch,
const ghobject_t& oid)
{
auto c = static_cast<Collection*>(ch.get());
logger().debug("{} {} {}",
__func__, c->get_cid(), oid);
auto o = c->get_object(oid);
if (!o) {
return crimson::ct_error::enoent::make();
}
return get_attrs_ertr::make_ready_future<attrs_t>(o->xattr);
}
auto CyanStore::Shard::omap_get_values(
CollectionRef ch,
const ghobject_t& oid,
const omap_keys_t& keys)
-> read_errorator::future<omap_values_t>
{
auto c = static_cast<Collection*>(ch.get());
logger().debug("{} {} {}", __func__, c->get_cid(), oid);
auto o = c->get_object(oid);
if (!o) {
return crimson::ct_error::enoent::make();
}
omap_values_t values;
for (auto& key : keys) {
if (auto found = o->omap.find(key); found != o->omap.end()) {
values.insert(*found);
}
}
return seastar::make_ready_future<omap_values_t>(std::move(values));
}
auto CyanStore::Shard::omap_get_values(
CollectionRef ch,
const ghobject_t &oid,
const std::optional<string> &start)
-> CyanStore::Shard::read_errorator::future<std::tuple<bool, omap_values_t>>
{
auto c = static_cast<Collection*>(ch.get());
logger().debug("{} {} {}", __func__, c->get_cid(), oid);
auto o = c->get_object(oid);
if (!o) {
return crimson::ct_error::enoent::make();
}
omap_values_t values;
for (auto i = start ? o->omap.upper_bound(*start) : o->omap.begin();
i != o->omap.end();
++i) {
values.insert(*i);
}
return seastar::make_ready_future<std::tuple<bool, omap_values_t>>(
std::make_tuple(true, std::move(values)));
}
auto CyanStore::Shard::omap_get_header(
CollectionRef ch,
const ghobject_t& oid)
-> CyanStore::Shard::get_attr_errorator::future<ceph::bufferlist>
{
auto c = static_cast<Collection*>(ch.get());
auto o = c->get_object(oid);
if (!o) {
return crimson::ct_error::enoent::make();
}
return get_attr_errorator::make_ready_future<ceph::bufferlist>(
o->omap_header);
}
seastar::future<> CyanStore::Shard::do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& t)
{
using ceph::os::Transaction;
int r = 0;
try {
auto i = t.begin();
while (i.have_op()) {
r = 0;
switch (auto op = i.decode_op(); op->op) {
case Transaction::OP_NOP:
break;
case Transaction::OP_REMOVE:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
r = _remove(cid, oid);
if (r == -ENOENT) {
r = 0;
}
}
break;
case Transaction::OP_TOUCH:
case Transaction::OP_CREATE:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
r = _touch(cid, oid);
}
break;
case Transaction::OP_WRITE:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
uint64_t off = op->off;
uint64_t len = op->len;
uint32_t fadvise_flags = i.get_fadvise_flags();
ceph::bufferlist bl;
i.decode_bl(bl);
r = _write(cid, oid, off, len, bl, fadvise_flags);
}
break;
case Transaction::OP_ZERO:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
uint64_t off = op->off;
uint64_t len = op->len;
r = _zero(cid, oid, off, len);
}
break;
case Transaction::OP_TRUNCATE:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
uint64_t off = op->off;
r = _truncate(cid, oid, off);
}
break;
case Transaction::OP_CLONE:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
ghobject_t noid = i.get_oid(op->dest_oid);
r = _clone(cid, oid, noid);
}
break;
case Transaction::OP_SETATTR:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
std::string name = i.decode_string();
ceph::bufferlist bl;
i.decode_bl(bl);
std::map<std::string, bufferlist> to_set;
to_set.emplace(name, std::move(bl));
r = _setattrs(cid, oid, std::move(to_set));
}
break;
case Transaction::OP_SETATTRS:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
std::map<std::string, bufferlist> aset;
i.decode_attrset(aset);
r = _setattrs(cid, oid, std::move(aset));
}
break;
case Transaction::OP_RMATTR:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
std::string name = i.decode_string();
r = _rm_attr(cid, oid, name);
}
break;
case Transaction::OP_RMATTRS:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
r = _rm_attrs(cid, oid);
}
break;
case Transaction::OP_MKCOLL:
{
coll_t cid = i.get_cid(op->cid);
r = _create_collection(cid, op->split_bits);
}
break;
case Transaction::OP_SETALLOCHINT:
{
r = 0;
}
break;
case Transaction::OP_OMAP_CLEAR:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
r = _omap_clear(cid, oid);
}
break;
case Transaction::OP_OMAP_SETKEYS:
{
coll_t cid = i.get_cid(op->cid);
ghobject_t oid = i.get_oid(op->oid);
std::map<std::string, ceph::bufferlist> aset;
i.decode_attrset(aset);
r = _omap_set_values(cid, oid, std::move(aset));
}
break;
case Transaction::OP_OMAP_SETHEADER:
{
const coll_t &cid = i.get_cid(op->cid);
const ghobject_t &oid = i.get_oid(op->oid);
ceph::bufferlist bl;
i.decode_bl(bl);
r = _omap_set_header(cid, oid, bl);
}
break;
case Transaction::OP_OMAP_RMKEYS:
{
const coll_t &cid = i.get_cid(op->cid);
const ghobject_t &oid = i.get_oid(op->oid);
omap_keys_t keys;
i.decode_keyset(keys);
r = _omap_rmkeys(cid, oid, keys);
}
break;
case Transaction::OP_OMAP_RMKEYRANGE:
{
const coll_t &cid = i.get_cid(op->cid);
const ghobject_t &oid = i.get_oid(op->oid);
string first, last;
first = i.decode_string();
last = i.decode_string();
r = _omap_rmkeyrange(cid, oid, first, last);
}
break;
case Transaction::OP_COLL_HINT:
{
ceph::bufferlist hint;
i.decode_bl(hint);
// ignored
break;
}
default:
logger().error("bad op {}", static_cast<unsigned>(op->op));
abort();
}
if (r < 0) {
break;
}
}
} catch (std::exception &e) {
logger().error("{} got exception {}", __func__, e);
r = -EINVAL;
}
if (r < 0) {
logger().error(" transaction dump:\n");
JSONFormatter f(true);
f.open_object_section("transaction");
t.dump(&f);
f.close_section();
std::stringstream str;
f.flush(str);
logger().error("{}", str.str());
ceph_assert(r == 0);
}
return seastar::now();
}
int CyanStore::Shard::_remove(const coll_t& cid, const ghobject_t& oid)
{
logger().debug("{} cid={} oid={}",
__func__, cid, oid);
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
auto i = c->object_hash.find(oid);
if (i == c->object_hash.end())
return -ENOENT;
used_bytes -= i->second->get_size();
c->object_hash.erase(i);
c->object_map.erase(oid);
return 0;
}
int CyanStore::Shard::_touch(const coll_t& cid, const ghobject_t& oid)
{
logger().debug("{} cid={} oid={}",
__func__, cid, oid);
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
c->get_or_create_object(oid);
return 0;
}
int CyanStore::Shard::_write(
const coll_t& cid,
const ghobject_t& oid,
uint64_t offset,
size_t len,
const ceph::bufferlist& bl,
uint32_t fadvise_flags)
{
logger().debug("{} {} {} {} ~ {}",
__func__, cid, oid, offset, len);
assert(len == bl.length());
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef o = c->get_or_create_object(oid);
if (len > 0 && !local_conf()->memstore_debug_omit_block_device_write) {
const ssize_t old_size = o->get_size();
o->write(offset, bl);
used_bytes += (o->get_size() - old_size);
}
return 0;
}
int CyanStore::Shard::_zero(
const coll_t& cid,
const ghobject_t& oid,
uint64_t offset,
size_t len)
{
logger().debug("{} {} {} {} ~ {}",
__func__, cid, oid, offset, len);
ceph::buffer::list bl;
bl.append_zero(len);
return _write(cid, oid, offset, len, bl, 0);
}
int CyanStore::Shard::_omap_clear(
const coll_t& cid,
const ghobject_t& oid)
{
logger().debug("{} {} {}", __func__, cid, oid);
auto c = _get_collection(cid);
if (!c) {
return -ENOENT;
}
ObjectRef o = c->get_object(oid);
if (!o) {
return -ENOENT;
}
o->omap.clear();
o->omap_header.clear();
return 0;
}
int CyanStore::Shard::_omap_set_values(
const coll_t& cid,
const ghobject_t& oid,
std::map<std::string, ceph::bufferlist> &&aset)
{
logger().debug(
"{} {} {} {} keys",
__func__, cid, oid, aset.size());
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef o = c->get_or_create_object(oid);
for (auto&& [key, val]: aset) {
o->omap.insert_or_assign(std::move(key), std::move(val));
}
return 0;
}
int CyanStore::Shard::_omap_set_header(
const coll_t& cid,
const ghobject_t& oid,
const ceph::bufferlist &header)
{
logger().debug(
"{} {} {} {} bytes",
__func__, cid, oid, header.length());
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef o = c->get_or_create_object(oid);
o->omap_header = header;
return 0;
}
int CyanStore::Shard::_omap_rmkeys(
const coll_t& cid,
const ghobject_t& oid,
const omap_keys_t& aset)
{
logger().debug(
"{} {} {} {} keys",
__func__, cid, oid, aset.size());
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef o = c->get_or_create_object(oid);
for (auto &i: aset) {
o->omap.erase(i);
}
return 0;
}
int CyanStore::Shard::_omap_rmkeyrange(
const coll_t& cid,
const ghobject_t& oid,
const std::string &first,
const std::string &last)
{
logger().debug(
"{} {} {} first={} last={}",
__func__, cid, oid, first, last);
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef o = c->get_or_create_object(oid);
for (auto i = o->omap.lower_bound(first);
i != o->omap.end() && i->first < last;
o->omap.erase(i++));
return 0;
}
int CyanStore::Shard::_truncate(
const coll_t& cid,
const ghobject_t& oid,
uint64_t size)
{
logger().debug("{} cid={} oid={} size={}",
__func__, cid, oid, size);
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef o = c->get_object(oid);
if (!o)
return -ENOENT;
if (local_conf()->memstore_debug_omit_block_device_write)
return 0;
const ssize_t old_size = o->get_size();
int r = o->truncate(size);
used_bytes += (o->get_size() - old_size);
return r;
}
int CyanStore::Shard::_clone(
const coll_t& cid,
const ghobject_t& oid,
const ghobject_t& noid)
{
logger().debug("{} cid={} oid={} noid={}",
__func__, cid, oid, noid);
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef oo = c->get_object(oid);
if (!oo)
return -ENOENT;
if (local_conf()->memstore_debug_omit_block_device_write)
return 0;
ObjectRef no = c->get_or_create_object(noid);
used_bytes += ((ssize_t)oo->get_size() - (ssize_t)no->get_size());
no->clone(oo.get(), 0, oo->get_size(), 0);
no->omap_header = oo->omap_header;
no->omap = oo->omap;
no->xattr = oo->xattr;
return 0;
}
int CyanStore::Shard::_setattrs(
const coll_t& cid,
const ghobject_t& oid,
std::map<std::string,bufferlist>&& aset)
{
logger().debug("{} cid={} oid={}",
__func__, cid, oid);
auto c = _get_collection(cid);
if (!c)
return -ENOENT;
ObjectRef o = c->get_object(oid);
if (!o)
return -ENOENT;
for (auto&& [key, val]: aset) {
o->xattr.insert_or_assign(std::move(key), std::move(val));
}
return 0;
}
int CyanStore::Shard::_rm_attr(
const coll_t& cid,
const ghobject_t& oid,
std::string_view name)
{
logger().debug("{} cid={} oid={} name={}", __func__, cid, oid, name);
auto c = _get_collection(cid);
if (!c) {
return -ENOENT;
}
ObjectRef o = c->get_object(oid);
if (!o) {
return -ENOENT;
}
auto i = o->xattr.find(name);
if (i == o->xattr.end()) {
return -ENODATA;
}
o->xattr.erase(i);
return 0;
}
int CyanStore::Shard::_rm_attrs(
const coll_t& cid,
const ghobject_t& oid)
{
logger().debug("{} cid={} oid={}", __func__, cid, oid);
auto c = _get_collection(cid);
if (!c) {
return -ENOENT;
}
ObjectRef o = c->get_object(oid);
if (!o) {
return -ENOENT;
}
o->xattr.clear();
return 0;
}
int CyanStore::Shard::_create_collection(const coll_t& cid, int bits)
{
auto result = coll_map.try_emplace(cid);
if (!result.second)
return -EEXIST;
auto p = new_coll_map.find(cid);
assert(p != new_coll_map.end());
result.first->second = p->second;
result.first->second->bits = bits;
new_coll_map.erase(p);
return 0;
}
boost::intrusive_ptr<Collection>
CyanStore::Shard::_get_collection(const coll_t& cid)
{
auto cp = coll_map.find(cid);
if (cp == coll_map.end())
return {};
return cp->second;
}
seastar::future<> CyanStore::write_meta(
const std::string& key,
const std::string& value)
{
ceph_assert(seastar::this_shard_id() == primary_core);
std::string v = value;
v += "\n";
if (int r = safe_write_file(path.c_str(), key.c_str(),
v.c_str(), v.length(), 0600);
r < 0) {
throw std::runtime_error{fmt::format("unable to write_meta({})", key)};
}
return seastar::make_ready_future<>();
}
seastar::future<std::tuple<int, std::string>>
CyanStore::read_meta(const std::string& key)
{
ceph_assert(seastar::this_shard_id() == primary_core);
std::string fsid(4096, '\0');
int r = safe_read_file(path.c_str(), key.c_str(), fsid.data(), fsid.size());
if (r > 0) {
fsid.resize(r);
// drop trailing newlines
boost::algorithm::trim_right_if(fsid,
[](unsigned char c) {return isspace(c);});
} else {
fsid.clear();
}
return seastar::make_ready_future<std::tuple<int, std::string>>(
std::make_tuple(r, fsid));
}
uuid_d CyanStore::get_fsid() const
{
ceph_assert(seastar::this_shard_id() == primary_core);
return osd_fsid;
}
unsigned CyanStore::Shard::get_max_attr_name_length() const
{
// arbitrary limitation exactly like in the case of MemStore.
return 256;
}
CyanStore::Shard::read_errorator::future<std::map<uint64_t, uint64_t>>
CyanStore::Shard::fiemap(
CollectionRef ch,
const ghobject_t& oid,
uint64_t off,
uint64_t len)
{
auto c = static_cast<Collection*>(ch.get());
ObjectRef o = c->get_object(oid);
if (!o) {
throw std::runtime_error(fmt::format("object does not exist: {}", oid));
}
std::map<uint64_t, uint64_t> m{{0, o->get_size()}};
return seastar::make_ready_future<std::map<uint64_t, uint64_t>>(std::move(m));
}
seastar::future<struct stat>
CyanStore::Shard::stat(
CollectionRef ch,
const ghobject_t& oid)
{
auto c = static_cast<Collection*>(ch.get());
auto o = c->get_object(oid);
if (!o) {
throw std::runtime_error(fmt::format("object does not exist: {}", oid));
}
struct stat st;
st.st_size = o->get_size();
return seastar::make_ready_future<struct stat>(std::move(st));
}
}
| 25,327 | 25.577125 | 94 | cc |
null | ceph-main/src/crimson/os/cyanstore/cyan_store.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <unordered_map>
#include <map>
#include <typeinfo>
#include <vector>
#include <optional>
#include <seastar/core/future.hh>
#include <seastar/core/future-util.hh>
#include "osd/osd_types.h"
#include "include/uuid.h"
#include "crimson/os/cyanstore/cyan_object.h"
#include "crimson/os/cyanstore/cyan_collection.h"
#include "crimson/os/futurized_store.h"
namespace ceph::os {
class Transaction;
}
namespace crimson::os {
class CyanStore final : public FuturizedStore {
class Shard : public FuturizedStore::Shard {
public:
Shard(std::string path)
:path(path){}
seastar::future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<ceph::bufferlist> read(
CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) final;
read_errorator::future<ceph::bufferlist> readv(
CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) final;
get_attr_errorator::future<ceph::bufferlist> get_attr(
CollectionRef c,
const ghobject_t& oid,
std::string_view name) const final;
get_attrs_ertr::future<attrs_t> get_attrs(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) final;
read_errorator::future<std::tuple<bool, omap_values_t>> omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) final;
get_attr_errorator::future<ceph::bufferlist> omap_get_header(
CollectionRef c,
const ghobject_t& oid) final;
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>>
list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const final;
seastar::future<CollectionRef> create_new_collection(const coll_t& cid) final;
seastar::future<CollectionRef> open_collection(const coll_t& cid) final;
seastar::future<> do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& txn) final;
read_errorator::future<std::map<uint64_t, uint64_t>>
fiemap(
CollectionRef c,
const ghobject_t& oid,
uint64_t off,
uint64_t len) final;
unsigned get_max_attr_name_length() const final;
public:
// only exposed to CyanStore
mount_ertr::future<> mount();
seastar::future<> umount();
seastar::future<> mkfs();
mkfs_ertr::future<> mkcoll(uuid_d new_osd_fsid);
using coll_core_t = FuturizedStore::coll_core_t;
seastar::future<std::vector<coll_core_t>> list_collections();
uint64_t get_used_bytes() const { return used_bytes; }
private:
int _remove(const coll_t& cid, const ghobject_t& oid);
int _touch(const coll_t& cid, const ghobject_t& oid);
int _write(const coll_t& cid, const ghobject_t& oid,
uint64_t offset, size_t len, const ceph::bufferlist& bl,
uint32_t fadvise_flags);
int _zero(const coll_t& cid, const ghobject_t& oid,
uint64_t offset, size_t len);
int _omap_clear(
const coll_t& cid,
const ghobject_t& oid);
int _omap_set_values(
const coll_t& cid,
const ghobject_t& oid,
std::map<std::string, ceph::bufferlist> &&aset);
int _omap_set_header(
const coll_t& cid,
const ghobject_t& oid,
const ceph::bufferlist &header);
int _omap_rmkeys(
const coll_t& cid,
const ghobject_t& oid,
const omap_keys_t& aset);
int _omap_rmkeyrange(
const coll_t& cid,
const ghobject_t& oid,
const std::string &first,
const std::string &last);
int _truncate(const coll_t& cid, const ghobject_t& oid, uint64_t size);
int _clone(const coll_t& cid, const ghobject_t& oid,
const ghobject_t& noid);
int _setattrs(const coll_t& cid, const ghobject_t& oid,
std::map<std::string,bufferlist>&& aset);
int _rm_attr(const coll_t& cid, const ghobject_t& oid,
std::string_view name);
int _rm_attrs(const coll_t& cid, const ghobject_t& oid);
int _create_collection(const coll_t& cid, int bits);
boost::intrusive_ptr<Collection> _get_collection(const coll_t& cid);
private:
uint64_t used_bytes = 0;
const std::string path;
std::unordered_map<coll_t, boost::intrusive_ptr<Collection>> coll_map;
std::map<coll_t, boost::intrusive_ptr<Collection>> new_coll_map;
};
public:
CyanStore(const std::string& path);
~CyanStore() final;
seastar::future<> start() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.start(path);
}
seastar::future<> stop() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.stop();
}
mount_ertr::future<> mount() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.invoke_on_all(
[](auto &local_store) {
return local_store.mount().handle_error(
crimson::stateful_ec::handle([](const auto& ec) {
crimson::get_logger(ceph_subsys_cyanstore).error(
"error mounting cyanstore: ({}) {}",
ec.value(), ec.message());
std::exit(EXIT_FAILURE);
}));
});
}
seastar::future<> umount() final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.invoke_on_all(
[](auto &local_store) {
return local_store.umount();
});
}
mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) final;
seastar::future<store_statfs_t> stat() const final;
uuid_d get_fsid() const final;
seastar::future<> write_meta(const std::string& key,
const std::string& value) final;
FuturizedStore::Shard& get_sharded_store() final{
return shard_stores.local();
}
seastar::future<std::tuple<int, std::string>>
read_meta(const std::string& key) final;
seastar::future<std::vector<coll_core_t>> list_collections() final;
private:
seastar::sharded<CyanStore::Shard> shard_stores;
const std::string path;
uuid_d osd_fsid;
};
}
| 6,408 | 28.131818 | 82 | h |
null | ceph-main/src/crimson/os/seastore/async_cleaner.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <fmt/chrono.h>
#include <seastar/core/metrics.hh>
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/backref_manager.h"
#include "crimson/os/seastore/transaction_manager.h"
SET_SUBSYS(seastore_cleaner);
namespace {
enum class gc_formula_t {
GREEDY,
BENEFIT,
COST_BENEFIT,
};
constexpr auto gc_formula = gc_formula_t::COST_BENEFIT;
}
namespace crimson::os::seastore {
void segment_info_t::set_open(
segment_seq_t _seq, segment_type_t _type,
data_category_t _category, rewrite_gen_t _generation)
{
ceph_assert(_seq != NULL_SEG_SEQ);
ceph_assert(_type != segment_type_t::NULL_SEG);
ceph_assert(_category != data_category_t::NUM);
ceph_assert(is_rewrite_generation(_generation));
state = Segment::segment_state_t::OPEN;
seq = _seq;
type = _type;
category = _category;
generation = _generation;
written_to = 0;
}
void segment_info_t::set_empty()
{
state = Segment::segment_state_t::EMPTY;
seq = NULL_SEG_SEQ;
type = segment_type_t::NULL_SEG;
category = data_category_t::NUM;
generation = NULL_GENERATION;
modify_time = NULL_TIME;
num_extents = 0;
written_to = 0;
}
void segment_info_t::set_closed()
{
state = Segment::segment_state_t::CLOSED;
// the rest of information is unchanged
}
void segment_info_t::init_closed(
segment_seq_t _seq, segment_type_t _type,
data_category_t _category, rewrite_gen_t _generation,
segment_off_t seg_size)
{
ceph_assert(_seq != NULL_SEG_SEQ);
ceph_assert(_type != segment_type_t::NULL_SEG);
ceph_assert(_category != data_category_t::NUM);
ceph_assert(is_rewrite_generation(_generation));
state = Segment::segment_state_t::CLOSED;
seq = _seq;
type = _type;
category = _category;
generation = _generation;
written_to = seg_size;
}
std::ostream& operator<<(std::ostream &out, const segment_info_t &info)
{
out << "seg_info_t("
<< "state=" << info.state
<< ", " << info.id;
if (info.is_empty()) {
// pass
} else { // open or closed
out << " " << info.type
<< " " << segment_seq_printer_t{info.seq}
<< " " << info.category
<< " " << rewrite_gen_printer_t{info.generation}
<< ", modify_time=" << sea_time_point_printer_t{info.modify_time}
<< ", num_extents=" << info.num_extents
<< ", written_to=" << info.written_to;
}
return out << ")";
}
void segments_info_t::reset()
{
segments.clear();
segment_size = 0;
journal_segment_id = NULL_SEG_ID;
num_in_journal_open = 0;
num_type_journal = 0;
num_type_ool = 0;
num_open = 0;
num_empty = 0;
num_closed = 0;
count_open_journal = 0;
count_open_ool = 0;
count_release_journal = 0;
count_release_ool = 0;
count_close_journal = 0;
count_close_ool = 0;
total_bytes = 0;
avail_bytes_in_open = 0;
modify_times.clear();
}
void segments_info_t::add_segment_manager(
SegmentManager &segment_manager)
{
LOG_PREFIX(segments_info_t::add_segment_manager);
device_id_t d_id = segment_manager.get_device_id();
auto ssize = segment_manager.get_segment_size();
auto nsegments = segment_manager.get_num_segments();
auto sm_size = segment_manager.get_available_size();
INFO("adding segment manager {}, size={}, ssize={}, segments={}",
device_id_printer_t{d_id}, sm_size, ssize, nsegments);
ceph_assert(ssize > 0);
ceph_assert(nsegments > 0);
ceph_assert(sm_size > 0);
// also validate if the device is duplicated
segments.add_device(d_id, nsegments, segment_info_t{});
// assume all the segment managers share the same settings as follows.
if (segment_size == 0) {
ceph_assert(ssize > 0);
segment_size = ssize;
} else {
ceph_assert(segment_size == ssize);
}
// NOTE: by default the segments are empty
num_empty += nsegments;
total_bytes += sm_size;
}
void segments_info_t::init_closed(
segment_id_t segment, segment_seq_t seq, segment_type_t type,
data_category_t category, rewrite_gen_t generation)
{
LOG_PREFIX(segments_info_t::init_closed);
auto& segment_info = segments[segment];
DEBUG("initiating {} {} {} {} {}, {}, "
"num_segments(empty={}, opened={}, closed={})",
segment, type, segment_seq_printer_t{seq},
category, rewrite_gen_printer_t{generation},
segment_info, num_empty, num_open, num_closed);
ceph_assert(segment_info.is_empty());
ceph_assert(num_empty > 0);
--num_empty;
++num_closed;
if (type == segment_type_t::JOURNAL) {
// init_closed won't initialize journal_segment_id
ceph_assert(get_submitted_journal_head() == JOURNAL_SEQ_NULL);
++num_type_journal;
} else {
++num_type_ool;
}
// do not increment count_close_*;
if (segment_info.modify_time != NULL_TIME) {
modify_times.insert(segment_info.modify_time);
} else {
ceph_assert(segment_info.num_extents == 0);
}
segment_info.init_closed(
seq, type, category, generation, get_segment_size());
}
void segments_info_t::mark_open(
segment_id_t segment, segment_seq_t seq, segment_type_t type,
data_category_t category, rewrite_gen_t generation)
{
LOG_PREFIX(segments_info_t::mark_open);
auto& segment_info = segments[segment];
INFO("opening {} {} {} {} {}, {}, "
"num_segments(empty={}, opened={}, closed={})",
segment, type, segment_seq_printer_t{seq},
category, rewrite_gen_printer_t{generation},
segment_info, num_empty, num_open, num_closed);
ceph_assert(segment_info.is_empty());
ceph_assert(num_empty > 0);
--num_empty;
++num_open;
if (type == segment_type_t::JOURNAL) {
if (journal_segment_id != NULL_SEG_ID) {
auto& last_journal_segment = segments[journal_segment_id];
ceph_assert(last_journal_segment.is_closed());
ceph_assert(last_journal_segment.type == segment_type_t::JOURNAL);
ceph_assert(last_journal_segment.seq + 1 == seq);
}
journal_segment_id = segment;
++num_in_journal_open;
++num_type_journal;
++count_open_journal;
} else {
++num_type_ool;
++count_open_ool;
}
avail_bytes_in_open += get_segment_size();
segment_info.set_open(seq, type, category, generation);
}
void segments_info_t::mark_empty(
segment_id_t segment)
{
LOG_PREFIX(segments_info_t::mark_empty);
auto& segment_info = segments[segment];
INFO("releasing {}, {}, num_segments(empty={}, opened={}, closed={})",
segment, segment_info,
num_empty, num_open, num_closed);
ceph_assert(segment_info.is_closed());
auto type = segment_info.type;
assert(type != segment_type_t::NULL_SEG);
ceph_assert(num_closed > 0);
--num_closed;
++num_empty;
if (type == segment_type_t::JOURNAL) {
ceph_assert(num_type_journal > 0);
--num_type_journal;
++count_release_journal;
} else {
ceph_assert(num_type_ool > 0);
--num_type_ool;
++count_release_ool;
}
if (segment_info.modify_time != NULL_TIME) {
auto to_erase = modify_times.find(segment_info.modify_time);
ceph_assert(to_erase != modify_times.end());
modify_times.erase(to_erase);
} else {
ceph_assert(segment_info.num_extents == 0);
}
segment_info.set_empty();
}
void segments_info_t::mark_closed(
segment_id_t segment)
{
LOG_PREFIX(segments_info_t::mark_closed);
auto& segment_info = segments[segment];
INFO("closing {}, {}, num_segments(empty={}, opened={}, closed={})",
segment, segment_info,
num_empty, num_open, num_closed);
ceph_assert(segment_info.is_open());
ceph_assert(num_open > 0);
--num_open;
++num_closed;
if (segment_info.type == segment_type_t::JOURNAL) {
ceph_assert(num_in_journal_open > 0);
--num_in_journal_open;
++count_close_journal;
} else {
++count_close_ool;
}
ceph_assert(get_segment_size() >= segment_info.written_to);
auto seg_avail_bytes = get_segment_size() - segment_info.written_to;
ceph_assert(avail_bytes_in_open >= (std::size_t)seg_avail_bytes);
avail_bytes_in_open -= seg_avail_bytes;
if (segment_info.modify_time != NULL_TIME) {
modify_times.insert(segment_info.modify_time);
} else {
ceph_assert(segment_info.num_extents == 0);
}
segment_info.set_closed();
}
void segments_info_t::update_written_to(
segment_type_t type,
paddr_t offset)
{
LOG_PREFIX(segments_info_t::update_written_to);
auto& saddr = offset.as_seg_paddr();
auto& segment_info = segments[saddr.get_segment_id()];
if (!segment_info.is_open()) {
ERROR("segment is not open, not updating, type={}, offset={}, {}",
type, offset, segment_info);
ceph_abort();
}
auto new_written_to = saddr.get_segment_off();
ceph_assert(new_written_to <= get_segment_size());
if (segment_info.written_to > new_written_to) {
ERROR("written_to should not decrease! type={}, offset={}, {}",
type, offset, segment_info);
ceph_abort();
}
DEBUG("type={}, offset={}, {}", type, offset, segment_info);
ceph_assert(type == segment_info.type);
auto avail_deduction = new_written_to - segment_info.written_to;
ceph_assert(avail_bytes_in_open >= (std::size_t)avail_deduction);
avail_bytes_in_open -= avail_deduction;
segment_info.written_to = new_written_to;
}
std::ostream &operator<<(std::ostream &os, const segments_info_t &infos)
{
return os << "segments("
<< "empty=" << infos.get_num_empty()
<< ", open=" << infos.get_num_open()
<< ", closed=" << infos.get_num_closed()
<< ", type_journal=" << infos.get_num_type_journal()
<< ", type_ool=" << infos.get_num_type_ool()
<< ", total=" << infos.get_total_bytes() << "B"
<< ", available=" << infos.get_available_bytes() << "B"
<< ", unavailable=" << infos.get_unavailable_bytes() << "B"
<< ", available_ratio=" << infos.get_available_ratio()
<< ", submitted_head=" << infos.get_submitted_journal_head()
<< ", time_bound=" << sea_time_point_printer_t{infos.get_time_bound()}
<< ")";
}
void JournalTrimmerImpl::config_t::validate() const
{
ceph_assert(max_journal_bytes <= DEVICE_OFF_MAX);
ceph_assert(max_journal_bytes > target_journal_dirty_bytes);
ceph_assert(max_journal_bytes > target_journal_alloc_bytes);
ceph_assert(rewrite_dirty_bytes_per_cycle > 0);
ceph_assert(rewrite_backref_bytes_per_cycle > 0);
}
JournalTrimmerImpl::config_t
JournalTrimmerImpl::config_t::get_default(
std::size_t roll_size, journal_type_t type)
{
assert(roll_size);
std::size_t target_dirty_bytes = 0;
std::size_t target_alloc_bytes = 0;
std::size_t max_journal_bytes = 0;
if (type == journal_type_t::SEGMENTED) {
target_dirty_bytes = 12 * roll_size;
target_alloc_bytes = 2 * roll_size;
max_journal_bytes = 16 * roll_size;
} else {
assert(type == journal_type_t::RANDOM_BLOCK);
target_dirty_bytes = roll_size / 4;
target_alloc_bytes = roll_size / 4;
max_journal_bytes = roll_size / 2;
}
return config_t{
target_dirty_bytes,
target_alloc_bytes,
max_journal_bytes,
1<<17,// rewrite_dirty_bytes_per_cycle
1<<24 // rewrite_backref_bytes_per_cycle
};
}
JournalTrimmerImpl::config_t
JournalTrimmerImpl::config_t::get_test(
std::size_t roll_size, journal_type_t type)
{
assert(roll_size);
std::size_t target_dirty_bytes = 0;
std::size_t target_alloc_bytes = 0;
std::size_t max_journal_bytes = 0;
if (type == journal_type_t::SEGMENTED) {
target_dirty_bytes = 2 * roll_size;
target_alloc_bytes = 2 * roll_size;
max_journal_bytes = 4 * roll_size;
} else {
assert(type == journal_type_t::RANDOM_BLOCK);
target_dirty_bytes = roll_size / 4;
target_alloc_bytes = roll_size / 4;
max_journal_bytes = roll_size / 2;
}
return config_t{
target_dirty_bytes,
target_alloc_bytes,
max_journal_bytes,
1<<17,// rewrite_dirty_bytes_per_cycle
1<<24 // rewrite_backref_bytes_per_cycle
};
}
JournalTrimmerImpl::JournalTrimmerImpl(
BackrefManager &backref_manager,
config_t config,
journal_type_t type,
device_off_t roll_start,
device_off_t roll_size)
: backref_manager(backref_manager),
config(config),
journal_type(type),
roll_start(roll_start),
roll_size(roll_size),
reserved_usage(0)
{
config.validate();
ceph_assert(roll_start >= 0);
ceph_assert(roll_size > 0);
register_metrics();
}
void JournalTrimmerImpl::set_journal_head(journal_seq_t head)
{
LOG_PREFIX(JournalTrimmerImpl::set_journal_head);
ceph_assert(head != JOURNAL_SEQ_NULL);
ceph_assert(journal_head == JOURNAL_SEQ_NULL ||
head >= journal_head);
ceph_assert(journal_alloc_tail == JOURNAL_SEQ_NULL ||
head >= journal_alloc_tail);
ceph_assert(journal_dirty_tail == JOURNAL_SEQ_NULL ||
head >= journal_dirty_tail);
std::swap(journal_head, head);
if (journal_head.segment_seq == head.segment_seq) {
DEBUG("journal_head {} => {}, {}",
head, journal_head, stat_printer_t{*this, false});
} else {
INFO("journal_head {} => {}, {}",
head, journal_head, stat_printer_t{*this, false});
}
background_callback->maybe_wake_background();
}
void JournalTrimmerImpl::update_journal_tails(
journal_seq_t dirty_tail,
journal_seq_t alloc_tail)
{
LOG_PREFIX(JournalTrimmerImpl::update_journal_tails);
if (dirty_tail != JOURNAL_SEQ_NULL) {
ceph_assert(journal_head == JOURNAL_SEQ_NULL ||
journal_head >= dirty_tail);
if (journal_dirty_tail != JOURNAL_SEQ_NULL &&
journal_dirty_tail > dirty_tail) {
ERROR("journal_dirty_tail {} => {} is backwards!",
journal_dirty_tail, dirty_tail);
ceph_abort();
}
std::swap(journal_dirty_tail, dirty_tail);
if (journal_dirty_tail.segment_seq == dirty_tail.segment_seq) {
DEBUG("journal_dirty_tail {} => {}, {}",
dirty_tail, journal_dirty_tail, stat_printer_t{*this, false});
} else {
INFO("journal_dirty_tail {} => {}, {}",
dirty_tail, journal_dirty_tail, stat_printer_t{*this, false});
}
}
if (alloc_tail != JOURNAL_SEQ_NULL) {
ceph_assert(journal_head == JOURNAL_SEQ_NULL ||
journal_head >= alloc_tail);
if (journal_alloc_tail != JOURNAL_SEQ_NULL &&
journal_alloc_tail > alloc_tail) {
ERROR("journal_alloc_tail {} => {} is backwards!",
journal_alloc_tail, alloc_tail);
ceph_abort();
}
std::swap(journal_alloc_tail, alloc_tail);
if (journal_alloc_tail.segment_seq == alloc_tail.segment_seq) {
DEBUG("journal_alloc_tail {} => {}, {}",
alloc_tail, journal_alloc_tail, stat_printer_t{*this, false});
} else {
INFO("journal_alloc_tail {} => {}, {}",
alloc_tail, journal_alloc_tail, stat_printer_t{*this, false});
}
}
background_callback->maybe_wake_background();
background_callback->maybe_wake_blocked_io();
}
journal_seq_t JournalTrimmerImpl::get_tail_limit() const
{
assert(background_callback->is_ready());
auto ret = journal_head.add_offset(
journal_type,
-static_cast<device_off_t>(config.max_journal_bytes),
roll_start,
roll_size);
return ret;
}
journal_seq_t JournalTrimmerImpl::get_dirty_tail_target() const
{
assert(background_callback->is_ready());
auto ret = journal_head.add_offset(
journal_type,
-static_cast<device_off_t>(config.target_journal_dirty_bytes),
roll_start,
roll_size);
return ret;
}
journal_seq_t JournalTrimmerImpl::get_alloc_tail_target() const
{
assert(background_callback->is_ready());
auto ret = journal_head.add_offset(
journal_type,
-static_cast<device_off_t>(config.target_journal_alloc_bytes),
roll_start,
roll_size);
return ret;
}
std::size_t JournalTrimmerImpl::get_dirty_journal_size() const
{
if (!background_callback->is_ready()) {
return 0;
}
auto ret = journal_head.relative_to(
journal_type,
journal_dirty_tail,
roll_start,
roll_size);
ceph_assert(ret >= 0);
return static_cast<std::size_t>(ret);
}
std::size_t JournalTrimmerImpl::get_alloc_journal_size() const
{
if (!background_callback->is_ready()) {
return 0;
}
auto ret = journal_head.relative_to(
journal_type,
journal_alloc_tail,
roll_start,
roll_size);
ceph_assert(ret >= 0);
return static_cast<std::size_t>(ret);
}
seastar::future<> JournalTrimmerImpl::trim() {
return seastar::when_all(
[this] {
if (should_trim_alloc()) {
return trim_alloc(
).handle_error(
crimson::ct_error::assert_all{
"encountered invalid error in trim_alloc"
}
);
} else {
return seastar::now();
}
},
[this] {
if (should_trim_dirty()) {
return trim_dirty(
).handle_error(
crimson::ct_error::assert_all{
"encountered invalid error in trim_dirty"
}
);
} else {
return seastar::now();
}
}
).discard_result();
}
JournalTrimmerImpl::trim_ertr::future<>
JournalTrimmerImpl::trim_alloc()
{
LOG_PREFIX(JournalTrimmerImpl::trim_alloc);
assert(background_callback->is_ready());
return repeat_eagain([this, FNAME] {
return extent_callback->with_transaction_intr(
Transaction::src_t::TRIM_ALLOC,
"trim_alloc",
[this, FNAME](auto &t)
{
auto target = get_alloc_tail_target();
DEBUGT("start, alloc_tail={}, target={}",
t, journal_alloc_tail, target);
return backref_manager.merge_cached_backrefs(
t,
target,
config.rewrite_backref_bytes_per_cycle
).si_then([this, FNAME, &t](auto trim_alloc_to)
-> ExtentCallbackInterface::submit_transaction_direct_iertr::future<>
{
DEBUGT("trim_alloc_to={}", t, trim_alloc_to);
if (trim_alloc_to != JOURNAL_SEQ_NULL) {
return extent_callback->submit_transaction_direct(
t, std::make_optional<journal_seq_t>(trim_alloc_to));
}
return seastar::now();
});
});
}).safe_then([this, FNAME] {
DEBUG("finish, alloc_tail={}", journal_alloc_tail);
});
}
JournalTrimmerImpl::trim_ertr::future<>
JournalTrimmerImpl::trim_dirty()
{
LOG_PREFIX(JournalTrimmerImpl::trim_dirty);
assert(background_callback->is_ready());
return repeat_eagain([this, FNAME] {
return extent_callback->with_transaction_intr(
Transaction::src_t::TRIM_DIRTY,
"trim_dirty",
[this, FNAME](auto &t)
{
auto target = get_dirty_tail_target();
DEBUGT("start, dirty_tail={}, target={}",
t, journal_dirty_tail, target);
return extent_callback->get_next_dirty_extents(
t,
target,
config.rewrite_dirty_bytes_per_cycle
).si_then([this, FNAME, &t](auto dirty_list) {
DEBUGT("rewrite {} dirty extents", t, dirty_list.size());
return seastar::do_with(
std::move(dirty_list),
[this, &t](auto &dirty_list)
{
return trans_intr::do_for_each(
dirty_list,
[this, &t](auto &e) {
return extent_callback->rewrite_extent(
t, e, INIT_GENERATION, NULL_TIME);
});
});
}).si_then([this, &t] {
return extent_callback->submit_transaction_direct(t);
});
});
}).safe_then([this, FNAME] {
DEBUG("finish, dirty_tail={}", journal_dirty_tail);
});
}
void JournalTrimmerImpl::register_metrics()
{
namespace sm = seastar::metrics;
metrics.add_group("journal_trimmer", {
sm::make_counter("dirty_journal_bytes",
[this] { return get_dirty_journal_size(); },
sm::description("the size of the journal for dirty extents")),
sm::make_counter("alloc_journal_bytes",
[this] { return get_alloc_journal_size(); },
sm::description("the size of the journal for alloc info"))
});
}
std::ostream &operator<<(
std::ostream &os, const JournalTrimmerImpl::stat_printer_t &stats)
{
os << "JournalTrimmer(";
if (stats.trimmer.background_callback->is_ready()) {
os << "should_block_io_on_trim=" << stats.trimmer.should_block_io_on_trim()
<< ", should_(trim_dirty=" << stats.trimmer.should_trim_dirty()
<< ", trim_alloc=" << stats.trimmer.should_trim_alloc() << ")";
} else {
os << "not-ready";
}
if (stats.detailed) {
os << ", journal_head=" << stats.trimmer.get_journal_head()
<< ", alloc_tail=" << stats.trimmer.get_alloc_tail()
<< ", dirty_tail=" << stats.trimmer.get_dirty_tail();
if (stats.trimmer.background_callback->is_ready()) {
os << ", alloc_tail_target=" << stats.trimmer.get_alloc_tail_target()
<< ", dirty_tail_target=" << stats.trimmer.get_dirty_tail_target()
<< ", tail_limit=" << stats.trimmer.get_tail_limit();
}
}
os << ")";
return os;
}
bool SpaceTrackerSimple::equals(const SpaceTrackerI &_other) const
{
LOG_PREFIX(SpaceTrackerSimple::equals);
const auto &other = static_cast<const SpaceTrackerSimple&>(_other);
if (other.live_bytes_by_segment.size() != live_bytes_by_segment.size()) {
ERROR("different segment counts, bug in test");
assert(0 == "segment counts should match");
return false;
}
bool all_match = true;
for (auto i = live_bytes_by_segment.begin(), j = other.live_bytes_by_segment.begin();
i != live_bytes_by_segment.end(); ++i, ++j) {
if (i->second.live_bytes != j->second.live_bytes) {
all_match = false;
DEBUG("segment_id {} live bytes mismatch *this: {}, other: {}",
i->first, i->second.live_bytes, j->second.live_bytes);
}
}
return all_match;
}
int64_t SpaceTrackerDetailed::SegmentMap::allocate(
device_segment_id_t segment,
segment_off_t offset,
extent_len_t len,
const extent_len_t block_size)
{
LOG_PREFIX(SegmentMap::allocate);
assert(offset % block_size == 0);
assert(len % block_size == 0);
const auto b = (offset / block_size);
const auto e = (offset + len) / block_size;
bool error = false;
for (auto i = b; i < e; ++i) {
if (bitmap[i]) {
if (!error) {
ERROR("found allocated in {}, {} ~ {}", segment, offset, len);
error = true;
}
DEBUG("block {} allocated", i * block_size);
}
bitmap[i] = true;
}
return update_usage(len);
}
int64_t SpaceTrackerDetailed::SegmentMap::release(
device_segment_id_t segment,
segment_off_t offset,
extent_len_t len,
const extent_len_t block_size)
{
LOG_PREFIX(SegmentMap::release);
assert(offset % block_size == 0);
assert(len % block_size == 0);
const auto b = (offset / block_size);
const auto e = (offset + len) / block_size;
bool error = false;
for (auto i = b; i < e; ++i) {
if (!bitmap[i]) {
if (!error) {
ERROR("found unallocated in {}, {} ~ {}", segment, offset, len);
error = true;
}
DEBUG("block {} unallocated", i * block_size);
}
bitmap[i] = false;
}
return update_usage(-(int64_t)len);
}
bool SpaceTrackerDetailed::equals(const SpaceTrackerI &_other) const
{
LOG_PREFIX(SpaceTrackerDetailed::equals);
const auto &other = static_cast<const SpaceTrackerDetailed&>(_other);
if (other.segment_usage.size() != segment_usage.size()) {
ERROR("different segment counts, bug in test");
assert(0 == "segment counts should match");
return false;
}
bool all_match = true;
for (auto i = segment_usage.begin(), j = other.segment_usage.begin();
i != segment_usage.end(); ++i, ++j) {
if (i->second.get_usage() != j->second.get_usage()) {
all_match = false;
ERROR("segment_id {} live bytes mismatch *this: {}, other: {}",
i->first, i->second.get_usage(), j->second.get_usage());
}
}
return all_match;
}
void SpaceTrackerDetailed::SegmentMap::dump_usage(extent_len_t block_size) const
{
LOG_PREFIX(SegmentMap::dump_usage);
INFO("dump start");
for (unsigned i = 0; i < bitmap.size(); ++i) {
if (bitmap[i]) {
LOCAL_LOGGER.info(" {} still live", i * block_size);
}
}
}
void SpaceTrackerDetailed::dump_usage(segment_id_t id) const
{
LOG_PREFIX(SpaceTrackerDetailed::dump_usage);
INFO("{}", id);
segment_usage[id].dump_usage(
block_size_by_segment_manager[id.device_id()]);
}
void SpaceTrackerSimple::dump_usage(segment_id_t id) const
{
LOG_PREFIX(SpaceTrackerSimple::dump_usage);
INFO("id: {}, live_bytes: {}",
id, live_bytes_by_segment[id].live_bytes);
}
std::ostream &operator<<(
std::ostream &os, const AsyncCleaner::stat_printer_t &stats)
{
stats.cleaner.print(os, stats.detailed);
return os;
}
SegmentCleaner::SegmentCleaner(
config_t config,
SegmentManagerGroupRef&& sm_group,
BackrefManager &backref_manager,
SegmentSeqAllocator &segment_seq_allocator,
bool detailed,
bool is_cold)
: detailed(detailed),
is_cold(is_cold),
config(config),
sm_group(std::move(sm_group)),
backref_manager(backref_manager),
ool_segment_seq_allocator(segment_seq_allocator)
{
config.validate();
}
void SegmentCleaner::register_metrics()
{
namespace sm = seastar::metrics;
stats.segment_util.buckets.resize(UTIL_BUCKETS);
std::size_t i;
for (i = 0; i < UTIL_BUCKETS; ++i) {
stats.segment_util.buckets[i].upper_bound = ((double)(i + 1)) / 10;
stats.segment_util.buckets[i].count = 0;
}
// NOTE: by default the segments are empty
i = get_bucket_index(UTIL_STATE_EMPTY);
stats.segment_util.buckets[i].count = segments.get_num_segments();
std::string prefix;
if (is_cold) {
prefix.append("cold_");
}
prefix.append("segment_cleaner");
metrics.add_group(prefix, {
sm::make_counter("segments_number",
[this] { return segments.get_num_segments(); },
sm::description("the number of segments")),
sm::make_counter("segment_size",
[this] { return segments.get_segment_size(); },
sm::description("the bytes of a segment")),
sm::make_counter("segments_in_journal",
[this] { return get_segments_in_journal(); },
sm::description("the number of segments in journal")),
sm::make_counter("segments_type_journal",
[this] { return segments.get_num_type_journal(); },
sm::description("the number of segments typed journal")),
sm::make_counter("segments_type_ool",
[this] { return segments.get_num_type_ool(); },
sm::description("the number of segments typed out-of-line")),
sm::make_counter("segments_open",
[this] { return segments.get_num_open(); },
sm::description("the number of open segments")),
sm::make_counter("segments_empty",
[this] { return segments.get_num_empty(); },
sm::description("the number of empty segments")),
sm::make_counter("segments_closed",
[this] { return segments.get_num_closed(); },
sm::description("the number of closed segments")),
sm::make_counter("segments_count_open_journal",
[this] { return segments.get_count_open_journal(); },
sm::description("the count of open journal segment operations")),
sm::make_counter("segments_count_open_ool",
[this] { return segments.get_count_open_ool(); },
sm::description("the count of open ool segment operations")),
sm::make_counter("segments_count_release_journal",
[this] { return segments.get_count_release_journal(); },
sm::description("the count of release journal segment operations")),
sm::make_counter("segments_count_release_ool",
[this] { return segments.get_count_release_ool(); },
sm::description("the count of release ool segment operations")),
sm::make_counter("segments_count_close_journal",
[this] { return segments.get_count_close_journal(); },
sm::description("the count of close journal segment operations")),
sm::make_counter("segments_count_close_ool",
[this] { return segments.get_count_close_ool(); },
sm::description("the count of close ool segment operations")),
sm::make_counter("total_bytes",
[this] { return segments.get_total_bytes(); },
sm::description("the size of the space")),
sm::make_counter("available_bytes",
[this] { return segments.get_available_bytes(); },
sm::description("the size of the space is available")),
sm::make_counter("unavailable_unreclaimable_bytes",
[this] { return get_unavailable_unreclaimable_bytes(); },
sm::description("the size of the space is unavailable and unreclaimable")),
sm::make_counter("unavailable_reclaimable_bytes",
[this] { return get_unavailable_reclaimable_bytes(); },
sm::description("the size of the space is unavailable and reclaimable")),
sm::make_counter("used_bytes", stats.used_bytes,
sm::description("the size of the space occupied by live extents")),
sm::make_counter("unavailable_unused_bytes",
[this] { return get_unavailable_unused_bytes(); },
sm::description("the size of the space is unavailable and not alive")),
sm::make_counter("projected_count", stats.projected_count,
sm::description("the number of projected usage reservations")),
sm::make_counter("projected_used_bytes_sum", stats.projected_used_bytes_sum,
sm::description("the sum of the projected usage in bytes")),
sm::make_counter("reclaimed_bytes", stats.reclaimed_bytes,
sm::description("rewritten bytes due to reclaim")),
sm::make_counter("reclaimed_segment_bytes", stats.reclaimed_segment_bytes,
sm::description("rewritten bytes due to reclaim")),
sm::make_counter("closed_journal_used_bytes", stats.closed_journal_used_bytes,
sm::description("used bytes when close a journal segment")),
sm::make_counter("closed_journal_total_bytes", stats.closed_journal_total_bytes,
sm::description("total bytes of closed journal segments")),
sm::make_counter("closed_ool_used_bytes", stats.closed_ool_used_bytes,
sm::description("used bytes when close a ool segment")),
sm::make_counter("closed_ool_total_bytes", stats.closed_ool_total_bytes,
sm::description("total bytes of closed ool segments")),
sm::make_gauge("available_ratio",
[this] { return segments.get_available_ratio(); },
sm::description("ratio of available space to total space")),
sm::make_gauge("reclaim_ratio",
[this] { return get_reclaim_ratio(); },
sm::description("ratio of reclaimable space to unavailable space")),
sm::make_histogram("segment_utilization_distribution",
[this]() -> seastar::metrics::histogram& {
return stats.segment_util;
},
sm::description("utilization distribution of all segments"))
});
}
segment_id_t SegmentCleaner::allocate_segment(
segment_seq_t seq,
segment_type_t type,
data_category_t category,
rewrite_gen_t generation)
{
LOG_PREFIX(SegmentCleaner::allocate_segment);
assert(seq != NULL_SEG_SEQ);
ceph_assert(type == segment_type_t::OOL ||
trimmer != nullptr); // segment_type_t::JOURNAL
for (auto it = segments.begin();
it != segments.end();
++it) {
auto seg_id = it->first;
auto& segment_info = it->second;
if (segment_info.is_empty()) {
auto old_usage = calc_utilization(seg_id);
segments.mark_open(seg_id, seq, type, category, generation);
background_callback->maybe_wake_background();
auto new_usage = calc_utilization(seg_id);
adjust_segment_util(old_usage, new_usage);
INFO("opened, {}", stat_printer_t{*this, false});
return seg_id;
}
}
ERROR("out of space with {} {} {} {}",
type, segment_seq_printer_t{seq}, category,
rewrite_gen_printer_t{generation});
ceph_abort();
return NULL_SEG_ID;
}
void SegmentCleaner::close_segment(segment_id_t segment)
{
LOG_PREFIX(SegmentCleaner::close_segment);
auto old_usage = calc_utilization(segment);
segments.mark_closed(segment);
auto &seg_info = segments[segment];
if (seg_info.type == segment_type_t::JOURNAL) {
stats.closed_journal_used_bytes += space_tracker->get_usage(segment);
stats.closed_journal_total_bytes += segments.get_segment_size();
} else {
stats.closed_ool_used_bytes += space_tracker->get_usage(segment);
stats.closed_ool_total_bytes += segments.get_segment_size();
}
auto new_usage = calc_utilization(segment);
adjust_segment_util(old_usage, new_usage);
INFO("closed, {} -- {}", stat_printer_t{*this, false}, seg_info);
}
double SegmentCleaner::calc_gc_benefit_cost(
segment_id_t id,
const sea_time_point &now_time,
const sea_time_point &bound_time) const
{
double util = calc_utilization(id);
ceph_assert(util >= 0 && util < 1);
if constexpr (gc_formula == gc_formula_t::GREEDY) {
return 1 - util;
}
if constexpr (gc_formula == gc_formula_t::COST_BENEFIT) {
if (util == 0) {
return std::numeric_limits<double>::max();
}
auto modify_time = segments[id].modify_time;
double age_segment = modify_time.time_since_epoch().count();
double age_now = now_time.time_since_epoch().count();
if (likely(age_now > age_segment)) {
return (1 - util) * (age_now - age_segment) / (2 * util);
} else {
// time is wrong
return (1 - util) / (2 * util);
}
}
assert(gc_formula == gc_formula_t::BENEFIT);
auto modify_time = segments[id].modify_time;
double age_factor = 0.5; // middle value if age is invalid
if (likely(bound_time != NULL_TIME &&
modify_time != NULL_TIME &&
now_time > modify_time)) {
assert(modify_time >= bound_time);
double age_bound = bound_time.time_since_epoch().count();
double age_now = now_time.time_since_epoch().count();
double age_segment = modify_time.time_since_epoch().count();
age_factor = (age_now - age_segment) / (age_now - age_bound);
}
return ((1 - 2 * age_factor) * util * util +
(2 * age_factor - 2) * util + 1);
}
SegmentCleaner::do_reclaim_space_ret
SegmentCleaner::do_reclaim_space(
const std::vector<CachedExtentRef> &backref_extents,
const backref_pin_list_t &pin_list,
std::size_t &reclaimed,
std::size_t &runs)
{
return repeat_eagain([this, &backref_extents,
&pin_list, &reclaimed, &runs] {
reclaimed = 0;
runs++;
auto src = Transaction::src_t::CLEANER_MAIN;
if (is_cold) {
src = Transaction::src_t::CLEANER_COLD;
}
return extent_callback->with_transaction_intr(
src,
"clean_reclaim_space",
[this, &backref_extents, &pin_list, &reclaimed](auto &t)
{
return seastar::do_with(
std::vector<CachedExtentRef>(backref_extents),
[this, &t, &reclaimed, &pin_list](auto &extents)
{
LOG_PREFIX(SegmentCleaner::do_reclaim_space);
// calculate live extents
auto cached_backref_entries =
backref_manager.get_cached_backref_entries_in_range(
reclaim_state->start_pos, reclaim_state->end_pos);
backref_entry_query_set_t backref_entries;
for (auto &pin : pin_list) {
backref_entries.emplace(
pin->get_key(),
pin->get_val(),
pin->get_length(),
pin->get_type(),
JOURNAL_SEQ_NULL);
}
for (auto &cached_backref : cached_backref_entries) {
if (cached_backref.laddr == L_ADDR_NULL) {
auto it = backref_entries.find(cached_backref.paddr);
assert(it->len == cached_backref.len);
backref_entries.erase(it);
} else {
backref_entries.emplace(cached_backref);
}
}
// retrieve live extents
DEBUGT("start, backref_entries={}, backref_extents={}",
t, backref_entries.size(), extents.size());
return seastar::do_with(
std::move(backref_entries),
[this, &extents, &t](auto &backref_entries) {
return trans_intr::parallel_for_each(
backref_entries,
[this, &extents, &t](auto &ent)
{
LOG_PREFIX(SegmentCleaner::do_reclaim_space);
TRACET("getting extent of type {} at {}~{}",
t,
ent.type,
ent.paddr,
ent.len);
return extent_callback->get_extents_if_live(
t, ent.type, ent.paddr, ent.laddr, ent.len
).si_then([FNAME, &extents, &ent, &t](auto list) {
if (list.empty()) {
TRACET("addr {} dead, skipping", t, ent.paddr);
} else {
for (auto &e : list) {
extents.emplace_back(std::move(e));
}
}
});
});
}).si_then([FNAME, &extents, this, &reclaimed, &t] {
DEBUGT("reclaim {} extents", t, extents.size());
// rewrite live extents
auto modify_time = segments[reclaim_state->get_segment_id()].modify_time;
return trans_intr::do_for_each(
extents,
[this, modify_time, &t, &reclaimed](auto ext)
{
reclaimed += ext->get_length();
return extent_callback->rewrite_extent(
t, ext, reclaim_state->target_generation, modify_time);
});
});
}).si_then([this, &t] {
return extent_callback->submit_transaction_direct(t);
});
});
});
}
SegmentCleaner::clean_space_ret SegmentCleaner::clean_space()
{
LOG_PREFIX(SegmentCleaner::clean_space);
assert(background_callback->is_ready());
ceph_assert(can_clean_space());
if (!reclaim_state) {
segment_id_t seg_id = get_next_reclaim_segment();
auto &segment_info = segments[seg_id];
INFO("reclaim {} {} start, usage={}, time_bound={}",
seg_id, segment_info,
space_tracker->calc_utilization(seg_id),
sea_time_point_printer_t{segments.get_time_bound()});
ceph_assert(segment_info.is_closed());
reclaim_state = reclaim_state_t::create(
seg_id, segment_info.generation, segments.get_segment_size());
}
reclaim_state->advance(config.reclaim_bytes_per_cycle);
DEBUG("reclaiming {} {}~{}",
rewrite_gen_printer_t{reclaim_state->generation},
reclaim_state->start_pos,
reclaim_state->end_pos);
double pavail_ratio = get_projected_available_ratio();
sea_time_point start = seastar::lowres_system_clock::now();
// Backref-tree doesn't support tree-read during tree-updates with parallel
// transactions. So, concurrent transactions between trim and reclaim are
// not allowed right now.
return seastar::do_with(
std::pair<std::vector<CachedExtentRef>, backref_pin_list_t>(),
[this](auto &weak_read_ret) {
return repeat_eagain([this, &weak_read_ret] {
return extent_callback->with_transaction_intr(
Transaction::src_t::READ,
"retrieve_from_backref_tree",
[this, &weak_read_ret](auto &t) {
return backref_manager.get_mappings(
t,
reclaim_state->start_pos,
reclaim_state->end_pos
).si_then([this, &t, &weak_read_ret](auto pin_list) {
if (!pin_list.empty()) {
auto it = pin_list.begin();
auto &first_pin = *it;
if (first_pin->get_key() < reclaim_state->start_pos) {
// BackrefManager::get_mappings may include a entry before
// reclaim_state->start_pos, which is semantically inconsistent
// with the requirements of the cleaner
pin_list.erase(it);
}
}
return backref_manager.retrieve_backref_extents_in_range(
t,
reclaim_state->start_pos,
reclaim_state->end_pos
).si_then([pin_list=std::move(pin_list),
&weak_read_ret](auto extents) mutable {
weak_read_ret = std::make_pair(std::move(extents), std::move(pin_list));
});
});
});
}).safe_then([&weak_read_ret] {
return std::move(weak_read_ret);
});
}).safe_then([this, FNAME, pavail_ratio, start](auto weak_read_ret) {
return seastar::do_with(
std::move(weak_read_ret.first),
std::move(weak_read_ret.second),
(size_t)0,
(size_t)0,
[this, FNAME, pavail_ratio, start](
auto &backref_extents, auto &pin_list, auto &reclaimed, auto &runs)
{
return do_reclaim_space(
backref_extents,
pin_list,
reclaimed,
runs
).safe_then([this, FNAME, pavail_ratio, start, &reclaimed, &runs] {
stats.reclaiming_bytes += reclaimed;
auto d = seastar::lowres_system_clock::now() - start;
DEBUG("duration: {}, pavail_ratio before: {}, repeats: {}",
d, pavail_ratio, runs);
if (reclaim_state->is_complete()) {
auto segment_to_release = reclaim_state->get_segment_id();
INFO("reclaim {} finish, reclaimed alive/total={}",
segment_to_release,
stats.reclaiming_bytes/(double)segments.get_segment_size());
stats.reclaimed_bytes += stats.reclaiming_bytes;
stats.reclaimed_segment_bytes += segments.get_segment_size();
stats.reclaiming_bytes = 0;
reclaim_state.reset();
return sm_group->release_segment(segment_to_release
).handle_error(
clean_space_ertr::pass_further{},
crimson::ct_error::assert_all{
"SegmentCleaner::clean_space encountered invalid error in release_segment"
}
).safe_then([this, FNAME, segment_to_release] {
auto old_usage = calc_utilization(segment_to_release);
if(unlikely(old_usage != 0)) {
space_tracker->dump_usage(segment_to_release);
ERROR("segment {} old_usage {} != 0",
segment_to_release, old_usage);
ceph_abort();
}
segments.mark_empty(segment_to_release);
auto new_usage = calc_utilization(segment_to_release);
adjust_segment_util(old_usage, new_usage);
INFO("released {}, {}",
segment_to_release, stat_printer_t{*this, false});
background_callback->maybe_wake_blocked_io();
});
} else {
return clean_space_ertr::now();
}
});
});
});
}
SegmentCleaner::mount_ret SegmentCleaner::mount()
{
LOG_PREFIX(SegmentCleaner::mount);
const auto& sms = sm_group->get_segment_managers();
INFO("{} segment managers", sms.size());
assert(background_callback->get_state() == state_t::MOUNT);
space_tracker.reset(
detailed ?
(SpaceTrackerI*)new SpaceTrackerDetailed(
sms) :
(SpaceTrackerI*)new SpaceTrackerSimple(
sms));
segments.reset();
for (auto sm : sms) {
segments.add_segment_manager(*sm);
}
segments.assign_ids();
stats = {};
metrics.clear();
register_metrics();
INFO("{} segments", segments.get_num_segments());
return crimson::do_for_each(
segments.begin(),
segments.end(),
[this, FNAME](auto& it)
{
auto segment_id = it.first;
return sm_group->read_segment_header(
segment_id
).safe_then([segment_id, this, FNAME](auto header) {
DEBUG("segment_id={} -- {}", segment_id, header);
auto s_type = header.get_type();
if (s_type == segment_type_t::NULL_SEG) {
ERROR("got null segment, segment_id={} -- {}", segment_id, header);
ceph_abort();
}
return sm_group->read_segment_tail(
segment_id
).safe_then([this, FNAME, segment_id, header](auto tail)
-> scan_extents_ertr::future<> {
if (tail.segment_nonce != header.segment_nonce) {
return scan_no_tail_segment(header, segment_id);
}
ceph_assert(header.get_type() == tail.get_type());
sea_time_point modify_time = mod_to_timepoint(tail.modify_time);
std::size_t num_extents = tail.num_extents;
if ((modify_time == NULL_TIME && num_extents == 0) ||
(modify_time != NULL_TIME && num_extents != 0)) {
segments.update_modify_time(segment_id, modify_time, num_extents);
} else {
ERROR("illegal modify time {}", tail);
return crimson::ct_error::input_output_error::make();
}
init_mark_segment_closed(
segment_id,
header.segment_seq,
header.type,
header.category,
header.generation);
return seastar::now();
}).handle_error(
crimson::ct_error::enodata::handle(
[this, header, segment_id](auto) {
return scan_no_tail_segment(header, segment_id);
}),
crimson::ct_error::pass_further_all{}
);
}).handle_error(
crimson::ct_error::enoent::handle([](auto) {
return mount_ertr::now();
}),
crimson::ct_error::enodata::handle([](auto) {
return mount_ertr::now();
}),
crimson::ct_error::input_output_error::pass_further{},
crimson::ct_error::assert_all{"unexpected error"}
);
}).safe_then([this, FNAME] {
INFO("done, {}", segments);
});
}
SegmentCleaner::scan_extents_ret SegmentCleaner::scan_no_tail_segment(
const segment_header_t &segment_header,
segment_id_t segment_id)
{
LOG_PREFIX(SegmentCleaner::scan_no_tail_segment);
INFO("scan {} {}", segment_id, segment_header);
return seastar::do_with(
scan_valid_records_cursor({
segments[segment_id].seq,
paddr_t::make_seg_paddr(segment_id, 0)
}),
SegmentManagerGroup::found_record_handler_t(
[this, segment_id, segment_header, FNAME](
record_locator_t locator,
const record_group_header_t &record_group_header,
const bufferlist& mdbuf
) mutable -> SegmentManagerGroup::scan_valid_records_ertr::future<>
{
DEBUG("{} {}, decoding {} records",
segment_id, segment_header.get_type(), record_group_header.records);
auto maybe_headers = try_decode_record_headers(
record_group_header, mdbuf);
if (!maybe_headers) {
// This should be impossible, we did check the crc on the mdbuf
ERROR("unable to decode record headers for record group {}",
locator.record_block_base);
return crimson::ct_error::input_output_error::make();
}
for (auto &record_header : *maybe_headers) {
auto modify_time = mod_to_timepoint(record_header.modify_time);
if (record_header.extents == 0 || modify_time != NULL_TIME) {
segments.update_modify_time(
segment_id, modify_time, record_header.extents);
} else {
ERROR("illegal modify time {}", record_header);
return crimson::ct_error::input_output_error::make();
}
}
return seastar::now();
}),
[this, segment_header](auto &cursor, auto &handler)
{
return sm_group->scan_valid_records(
cursor,
segment_header.segment_nonce,
segments.get_segment_size(),
handler).discard_result();
}).safe_then([this, segment_id, segment_header] {
init_mark_segment_closed(
segment_id,
segment_header.segment_seq,
segment_header.type,
segment_header.category,
segment_header.generation);
});
}
bool SegmentCleaner::check_usage()
{
SpaceTrackerIRef tracker(space_tracker->make_empty());
extent_callback->with_transaction_weak(
"check_usage",
[this, &tracker](auto &t) {
return backref_manager.scan_mapped_space(
t,
[&tracker](
paddr_t paddr,
paddr_t backref_key,
extent_len_t len,
extent_types_t type,
laddr_t laddr)
{
if (paddr.get_addr_type() == paddr_types_t::SEGMENT) {
if (is_backref_node(type)) {
assert(laddr == L_ADDR_NULL);
assert(backref_key != P_ADDR_NULL);
tracker->allocate(
paddr.as_seg_paddr().get_segment_id(),
paddr.as_seg_paddr().get_segment_off(),
len);
} else if (laddr == L_ADDR_NULL) {
assert(backref_key == P_ADDR_NULL);
tracker->release(
paddr.as_seg_paddr().get_segment_id(),
paddr.as_seg_paddr().get_segment_off(),
len);
} else {
assert(backref_key == P_ADDR_NULL);
tracker->allocate(
paddr.as_seg_paddr().get_segment_id(),
paddr.as_seg_paddr().get_segment_off(),
len);
}
}
});
}).unsafe_get0();
return space_tracker->equals(*tracker);
}
void SegmentCleaner::mark_space_used(
paddr_t addr,
extent_len_t len)
{
LOG_PREFIX(SegmentCleaner::mark_space_used);
assert(background_callback->get_state() >= state_t::SCAN_SPACE);
assert(len);
// TODO: drop
if (addr.get_addr_type() != paddr_types_t::SEGMENT) {
return;
}
auto& seg_addr = addr.as_seg_paddr();
stats.used_bytes += len;
auto old_usage = calc_utilization(seg_addr.get_segment_id());
[[maybe_unused]] auto ret = space_tracker->allocate(
seg_addr.get_segment_id(),
seg_addr.get_segment_off(),
len);
auto new_usage = calc_utilization(seg_addr.get_segment_id());
adjust_segment_util(old_usage, new_usage);
background_callback->maybe_wake_background();
assert(ret > 0);
DEBUG("segment {} new len: {}~{}, live_bytes: {}",
seg_addr.get_segment_id(),
addr,
len,
space_tracker->get_usage(seg_addr.get_segment_id()));
}
void SegmentCleaner::mark_space_free(
paddr_t addr,
extent_len_t len)
{
LOG_PREFIX(SegmentCleaner::mark_space_free);
assert(background_callback->get_state() >= state_t::SCAN_SPACE);
assert(len);
// TODO: drop
if (addr.get_addr_type() != paddr_types_t::SEGMENT) {
return;
}
ceph_assert(stats.used_bytes >= len);
stats.used_bytes -= len;
auto& seg_addr = addr.as_seg_paddr();
DEBUG("segment {} free len: {}~{}",
seg_addr.get_segment_id(), addr, len);
auto old_usage = calc_utilization(seg_addr.get_segment_id());
[[maybe_unused]] auto ret = space_tracker->release(
seg_addr.get_segment_id(),
seg_addr.get_segment_off(),
len);
auto new_usage = calc_utilization(seg_addr.get_segment_id());
adjust_segment_util(old_usage, new_usage);
background_callback->maybe_wake_blocked_io();
assert(ret >= 0);
DEBUG("segment {} free len: {}~{}, live_bytes: {}",
seg_addr.get_segment_id(),
addr,
len,
space_tracker->get_usage(seg_addr.get_segment_id()));
}
segment_id_t SegmentCleaner::get_next_reclaim_segment() const
{
LOG_PREFIX(SegmentCleaner::get_next_reclaim_segment);
segment_id_t id = NULL_SEG_ID;
double max_benefit_cost = 0;
sea_time_point now_time;
if constexpr (gc_formula != gc_formula_t::GREEDY) {
now_time = seastar::lowres_system_clock::now();
} else {
now_time = NULL_TIME;
}
sea_time_point bound_time;
if constexpr (gc_formula == gc_formula_t::BENEFIT) {
bound_time = segments.get_time_bound();
if (bound_time == NULL_TIME) {
WARN("BENEFIT -- bound_time is NULL_TIME");
}
} else {
bound_time = NULL_TIME;
}
for (auto& [_id, segment_info] : segments) {
if (segment_info.is_closed() &&
(trimmer == nullptr ||
!segment_info.is_in_journal(trimmer->get_journal_tail()))) {
double benefit_cost = calc_gc_benefit_cost(_id, now_time, bound_time);
if (benefit_cost > max_benefit_cost) {
id = _id;
max_benefit_cost = benefit_cost;
}
}
}
if (id != NULL_SEG_ID) {
DEBUG("segment {}, benefit_cost {}",
id, max_benefit_cost);
return id;
} else {
ceph_assert(get_segments_reclaimable() == 0);
// see should_clean_space()
ceph_abort("impossible!");
return NULL_SEG_ID;
}
}
bool SegmentCleaner::try_reserve_projected_usage(std::size_t projected_usage)
{
assert(background_callback->is_ready());
stats.projected_used_bytes += projected_usage;
if (should_block_io_on_clean()) {
stats.projected_used_bytes -= projected_usage;
return false;
} else {
++stats.projected_count;
stats.projected_used_bytes_sum += stats.projected_used_bytes;
return true;
}
}
void SegmentCleaner::release_projected_usage(std::size_t projected_usage)
{
assert(background_callback->is_ready());
ceph_assert(stats.projected_used_bytes >= projected_usage);
stats.projected_used_bytes -= projected_usage;
background_callback->maybe_wake_blocked_io();
}
void SegmentCleaner::print(std::ostream &os, bool is_detailed) const
{
os << "SegmentCleaner(";
if (background_callback->is_ready()) {
os << "should_block_io_on_clean=" << should_block_io_on_clean()
<< ", should_clean=" << should_clean_space();
} else {
os << "not-ready";
}
os << ", projected_avail_ratio=" << get_projected_available_ratio()
<< ", reclaim_ratio=" << get_reclaim_ratio()
<< ", alive_ratio=" << get_alive_ratio();
if (is_detailed) {
os << ", unavailable_unreclaimable="
<< get_unavailable_unreclaimable_bytes() << "B"
<< ", unavailable_reclaimble="
<< get_unavailable_reclaimable_bytes() << "B"
<< ", alive=" << stats.used_bytes << "B"
<< ", " << segments;
}
os << ")";
}
RBMCleaner::RBMCleaner(
RBMDeviceGroupRef&& rb_group,
BackrefManager &backref_manager,
bool detailed)
: detailed(detailed),
rb_group(std::move(rb_group)),
backref_manager(backref_manager)
{}
void RBMCleaner::print(std::ostream &os, bool is_detailed) const
{
// TODO
return;
}
void RBMCleaner::mark_space_used(
paddr_t addr,
extent_len_t len)
{
LOG_PREFIX(RBMCleaner::mark_space_used);
assert(addr.get_addr_type() == paddr_types_t::RANDOM_BLOCK);
auto rbms = rb_group->get_rb_managers();
for (auto rbm : rbms) {
if (addr.get_device_id() == rbm->get_device_id()) {
if (rbm->get_start() <= addr) {
INFO("allocate addr: {} len: {}", addr, len);
stats.used_bytes += len;
rbm->mark_space_used(addr, len);
}
return;
}
}
}
void RBMCleaner::mark_space_free(
paddr_t addr,
extent_len_t len)
{
LOG_PREFIX(RBMCleaner::mark_space_free);
assert(addr.get_addr_type() == paddr_types_t::RANDOM_BLOCK);
auto rbms = rb_group->get_rb_managers();
for (auto rbm : rbms) {
if (addr.get_device_id() == rbm->get_device_id()) {
if (rbm->get_start() <= addr) {
INFO("free addr: {} len: {}", addr, len);
ceph_assert(stats.used_bytes >= len);
stats.used_bytes -= len;
rbm->mark_space_free(addr, len);
}
return;
}
}
}
void RBMCleaner::commit_space_used(paddr_t addr, extent_len_t len)
{
auto rbms = rb_group->get_rb_managers();
for (auto rbm : rbms) {
if (addr.get_device_id() == rbm->get_device_id()) {
if (rbm->get_start() <= addr) {
rbm->complete_allocation(addr, len);
}
return;
}
}
}
bool RBMCleaner::try_reserve_projected_usage(std::size_t projected_usage)
{
assert(background_callback->is_ready());
stats.projected_used_bytes += projected_usage;
return true;
}
void RBMCleaner::release_projected_usage(std::size_t projected_usage)
{
assert(background_callback->is_ready());
ceph_assert(stats.projected_used_bytes >= projected_usage);
stats.projected_used_bytes -= projected_usage;
background_callback->maybe_wake_blocked_io();
}
RBMCleaner::clean_space_ret RBMCleaner::clean_space()
{
// TODO
return clean_space_ertr::now();
}
RBMCleaner::mount_ret RBMCleaner::mount()
{
stats = {};
register_metrics();
return seastar::do_with(
rb_group->get_rb_managers(),
[](auto &rbs) {
return crimson::do_for_each(
rbs.begin(),
rbs.end(),
[](auto& it) {
return it->open(
).handle_error(
crimson::ct_error::input_output_error::pass_further(),
crimson::ct_error::assert_all{
"Invalid error when opening RBM"}
);
});
});
}
bool RBMCleaner::check_usage()
{
assert(detailed);
const auto& rbms = rb_group->get_rb_managers();
RBMSpaceTracker tracker(rbms);
extent_callback->with_transaction_weak(
"check_usage",
[this, &tracker, &rbms](auto &t) {
return backref_manager.scan_mapped_space(
t,
[&tracker, &rbms](
paddr_t paddr,
paddr_t backref_key,
extent_len_t len,
extent_types_t type,
laddr_t laddr)
{
for (auto rbm : rbms) {
if (rbm->get_device_id() == paddr.get_device_id()) {
if (is_backref_node(type)) {
assert(laddr == L_ADDR_NULL);
assert(backref_key != P_ADDR_NULL);
tracker.allocate(
paddr,
len);
} else if (laddr == L_ADDR_NULL) {
assert(backref_key == P_ADDR_NULL);
tracker.release(
paddr,
len);
} else {
assert(backref_key == P_ADDR_NULL);
tracker.allocate(
paddr,
len);
}
}
}
});
}).unsafe_get0();
return equals(tracker);
}
bool RBMCleaner::equals(const RBMSpaceTracker &_other) const
{
LOG_PREFIX(RBMSpaceTracker::equals);
const auto &other = static_cast<const RBMSpaceTracker&>(_other);
auto rbs = rb_group->get_rb_managers();
//TODO: multiple rbm allocator
auto rbm = rbs[0];
assert(rbm);
if (rbm->get_device()->get_available_size() / rbm->get_block_size()
!= other.block_usage.size()) {
assert(0 == "block counts should match");
return false;
}
bool all_match = true;
for (auto i = other.block_usage.begin();
i != other.block_usage.end(); ++i) {
if (i->first < rbm->get_start().as_blk_paddr().get_device_off()) {
continue;
}
auto addr = i->first;
auto state = rbm->get_extent_state(
convert_abs_addr_to_paddr(addr, rbm->get_device_id()),
rbm->get_block_size());
if ((i->second.used && state == rbm_extent_state_t::ALLOCATED) ||
(!i->second.used && (state == rbm_extent_state_t::FREE ||
state == rbm_extent_state_t::RESERVED))) {
// pass
} else {
all_match = false;
ERROR("block addr {} mismatch other used: {}",
addr, i->second.used);
}
}
return all_match;
}
void RBMCleaner::register_metrics()
{
namespace sm = seastar::metrics;
metrics.add_group("rbm_cleaner", {
sm::make_counter("total_bytes",
[this] { return get_total_bytes(); },
sm::description("the size of the space")),
sm::make_counter("available_bytes",
[this] { return get_total_bytes() - get_journal_bytes() - stats.used_bytes; },
sm::description("the size of the space is available")),
sm::make_counter("used_bytes", stats.used_bytes,
sm::description("the size of the space occupied by live extents")),
});
}
}
| 58,318 | 31.078658 | 88 | cc |
null | ceph-main/src/crimson/os/seastore/async_cleaner.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive/set.hpp>
#include <seastar/core/metrics_types.hh>
#include "common/ceph_time.h"
#include "osd/osd_types.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/randomblock_manager_group.h"
#include "crimson/os/seastore/transaction.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
namespace crimson::os::seastore {
/*
* segment_info_t
*
* Maintains the tracked information for a segment.
* It is read-only outside segments_info_t.
*/
struct segment_info_t {
segment_id_t id = NULL_SEG_ID;
// segment_info_t is initiated as set_empty()
Segment::segment_state_t state = Segment::segment_state_t::EMPTY;
// Will be non-null for any segments in the current journal
segment_seq_t seq = NULL_SEG_SEQ;
segment_type_t type = segment_type_t::NULL_SEG;
data_category_t category = data_category_t::NUM;
rewrite_gen_t generation = NULL_GENERATION;
sea_time_point modify_time = NULL_TIME;
std::size_t num_extents = 0;
segment_off_t written_to = 0;
bool is_in_journal(journal_seq_t tail_committed) const {
return type == segment_type_t::JOURNAL &&
tail_committed.segment_seq <= seq;
}
bool is_empty() const {
return state == Segment::segment_state_t::EMPTY;
}
bool is_closed() const {
return state == Segment::segment_state_t::CLOSED;
}
bool is_open() const {
return state == Segment::segment_state_t::OPEN;
}
void init_closed(segment_seq_t, segment_type_t,
data_category_t, rewrite_gen_t,
segment_off_t);
void set_open(segment_seq_t, segment_type_t,
data_category_t, rewrite_gen_t);
void set_empty();
void set_closed();
void update_modify_time(sea_time_point _modify_time, std::size_t _num_extents) {
ceph_assert(!is_closed());
assert(_modify_time != NULL_TIME);
assert(_num_extents != 0);
if (modify_time == NULL_TIME) {
modify_time = _modify_time;
num_extents = _num_extents;
} else {
modify_time = get_average_time(
modify_time, num_extents, _modify_time, _num_extents);
num_extents += _num_extents;
}
}
};
std::ostream& operator<<(std::ostream&, const segment_info_t&);
/*
* segments_info_t
*
* Keep track of all segments and related information.
*/
class segments_info_t {
public:
segments_info_t() {
reset();
}
const segment_info_t& operator[](segment_id_t id) const {
return segments[id];
}
auto begin() const {
return segments.begin();
}
auto end() const {
return segments.end();
}
std::size_t get_num_segments() const {
assert(segments.size() > 0);
return segments.size();
}
segment_off_t get_segment_size() const {
assert(segment_size > 0);
return segment_size;
}
std::size_t get_num_in_journal_open() const {
return num_in_journal_open;
}
std::size_t get_num_type_journal() const {
return num_type_journal;
}
std::size_t get_num_type_ool() const {
return num_type_ool;
}
std::size_t get_num_open() const {
return num_open;
}
std::size_t get_num_empty() const {
return num_empty;
}
std::size_t get_num_closed() const {
return num_closed;
}
std::size_t get_count_open_journal() const {
return count_open_journal;
}
std::size_t get_count_open_ool() const {
return count_open_ool;
}
std::size_t get_count_release_journal() const {
return count_release_journal;
}
std::size_t get_count_release_ool() const {
return count_release_ool;
}
std::size_t get_count_close_journal() const {
return count_close_journal;
}
std::size_t get_count_close_ool() const {
return count_close_ool;
}
std::size_t get_total_bytes() const {
return total_bytes;
}
/// the available space that is writable, including in open segments
std::size_t get_available_bytes() const {
return num_empty * get_segment_size() + avail_bytes_in_open;
}
/// the unavailable space that is not writable
std::size_t get_unavailable_bytes() const {
assert(total_bytes >= get_available_bytes());
return total_bytes - get_available_bytes();
}
std::size_t get_available_bytes_in_open() const {
return avail_bytes_in_open;
}
double get_available_ratio() const {
return (double)get_available_bytes() / (double)total_bytes;
}
journal_seq_t get_submitted_journal_head() const {
if (unlikely(journal_segment_id == NULL_SEG_ID)) {
return JOURNAL_SEQ_NULL;
}
auto &segment_info = segments[journal_segment_id];
assert(!segment_info.is_empty());
assert(segment_info.type == segment_type_t::JOURNAL);
assert(segment_info.seq != NULL_SEG_SEQ);
return journal_seq_t{
segment_info.seq,
paddr_t::make_seg_paddr(
journal_segment_id,
segment_info.written_to)
};
}
sea_time_point get_time_bound() const {
if (!modify_times.empty()) {
return *modify_times.begin();
} else {
return NULL_TIME;
}
}
void reset();
void add_segment_manager(SegmentManager &segment_manager);
void assign_ids() {
for (auto &item : segments) {
item.second.id = item.first;
}
}
// initiate non-empty segments, the others are by default empty
void init_closed(segment_id_t, segment_seq_t, segment_type_t,
data_category_t, rewrite_gen_t);
void mark_open(segment_id_t, segment_seq_t, segment_type_t,
data_category_t, rewrite_gen_t);
void mark_empty(segment_id_t);
void mark_closed(segment_id_t);
void update_written_to(segment_type_t, paddr_t);
void update_modify_time(
segment_id_t id, sea_time_point tp, std::size_t num) {
if (num == 0) {
return;
}
assert(tp != NULL_TIME);
segments[id].update_modify_time(tp, num);
}
private:
// See reset() for member initialization
segment_map_t<segment_info_t> segments;
segment_off_t segment_size;
segment_id_t journal_segment_id;
std::size_t num_in_journal_open;
std::size_t num_type_journal;
std::size_t num_type_ool;
std::size_t num_open;
std::size_t num_empty;
std::size_t num_closed;
std::size_t count_open_journal;
std::size_t count_open_ool;
std::size_t count_release_journal;
std::size_t count_release_ool;
std::size_t count_close_journal;
std::size_t count_close_ool;
std::size_t total_bytes;
std::size_t avail_bytes_in_open;
std::multiset<sea_time_point> modify_times;
};
std::ostream &operator<<(std::ostream &, const segments_info_t &);
/**
* Callback interface for querying extents and operating on transactions.
*/
class ExtentCallbackInterface {
public:
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using base_iertr = trans_iertr<base_ertr>;
virtual ~ExtentCallbackInterface() = default;
/// Creates empty transaction
/// weak transaction should be type READ
virtual TransactionRef create_transaction(
Transaction::src_t, const char *name, bool is_weak=false) = 0;
/// Creates empty transaction with interruptible context
template <typename Func>
auto with_transaction_intr(
Transaction::src_t src,
const char* name,
Func &&f) {
return do_with_transaction_intr<Func, false>(
src, name, std::forward<Func>(f));
}
template <typename Func>
auto with_transaction_weak(
const char* name,
Func &&f) {
return do_with_transaction_intr<Func, true>(
Transaction::src_t::READ, name, std::forward<Func>(f)
).handle_error(
crimson::ct_error::eagain::handle([] {
ceph_assert(0 == "eagain impossible");
}),
crimson::ct_error::pass_further_all{}
);
}
/// See Cache::get_next_dirty_extents
using get_next_dirty_extents_iertr = base_iertr;
using get_next_dirty_extents_ret = get_next_dirty_extents_iertr::future<
std::vector<CachedExtentRef>>;
virtual get_next_dirty_extents_ret get_next_dirty_extents(
Transaction &t, ///< [in] current transaction
journal_seq_t bound,///< [in] return extents with dirty_from < bound
size_t max_bytes ///< [in] return up to max_bytes of extents
) = 0;
/**
* rewrite_extent
*
* Updates t with operations moving the passed extents to a new
* segment. extent may be invalid, implementation must correctly
* handle finding the current instance if it is still alive and
* otherwise ignore it.
*/
using rewrite_extent_iertr = base_iertr;
using rewrite_extent_ret = rewrite_extent_iertr::future<>;
virtual rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent,
rewrite_gen_t target_generation,
sea_time_point modify_time) = 0;
/**
* get_extent_if_live
*
* Returns extent at specified location if still referenced by
* lba_manager and not removed by t.
*
* See TransactionManager::get_extent_if_live and
* LBAManager::get_physical_extent_if_live.
*/
using get_extents_if_live_iertr = base_iertr;
using get_extents_if_live_ret = get_extents_if_live_iertr::future<
std::list<CachedExtentRef>>;
virtual get_extents_if_live_ret get_extents_if_live(
Transaction &t,
extent_types_t type,
paddr_t addr,
laddr_t laddr,
extent_len_t len) = 0;
/**
* submit_transaction_direct
*
* Submits transaction without any space throttling.
*/
using submit_transaction_direct_iertr = base_iertr;
using submit_transaction_direct_ret =
submit_transaction_direct_iertr::future<>;
virtual submit_transaction_direct_ret submit_transaction_direct(
Transaction &t,
std::optional<journal_seq_t> seq_to_trim = std::nullopt) = 0;
private:
template <typename Func, bool IsWeak>
auto do_with_transaction_intr(
Transaction::src_t src,
const char* name,
Func &&f) {
return seastar::do_with(
create_transaction(src, name, IsWeak),
[f=std::forward<Func>(f)](auto &ref_t) mutable {
return with_trans_intr(
*ref_t,
[f=std::forward<Func>(f)](auto& t) mutable {
return f(t);
}
);
}
);
}
};
/**
* Callback interface to wake up background works
*/
struct BackgroundListener {
enum class state_t {
STOP,
MOUNT,
SCAN_SPACE,
RUNNING,
HALT,
};
virtual ~BackgroundListener() = default;
virtual void maybe_wake_background() = 0;
virtual void maybe_wake_blocked_io() = 0;
virtual state_t get_state() const = 0;
bool is_ready() const {
return get_state() >= state_t::RUNNING;
}
};
/**
* Callback interface for Journal
*/
class JournalTrimmer {
public:
// get the committed journal head
virtual journal_seq_t get_journal_head() const = 0;
// set the committed journal head
virtual void set_journal_head(journal_seq_t) = 0;
// get the committed journal dirty tail
virtual journal_seq_t get_dirty_tail() const = 0;
// get the committed journal alloc tail
virtual journal_seq_t get_alloc_tail() const = 0;
// set the committed journal tails
virtual void update_journal_tails(
journal_seq_t dirty_tail, journal_seq_t alloc_tail) = 0;
// try reserve the projected usage in journal
// returns if the reservation is successful
// if the reservation is successful, user should call
// release_inline_usage to restore.
virtual bool try_reserve_inline_usage(std::size_t usage) = 0;
// release the projected usage in journal
virtual void release_inline_usage(std::size_t usage) = 0;
virtual ~JournalTrimmer() {}
journal_seq_t get_journal_tail() const {
return std::min(get_alloc_tail(), get_dirty_tail());
}
virtual std::size_t get_trim_size_per_cycle() const = 0;
bool check_is_ready() const {
return (get_journal_head() != JOURNAL_SEQ_NULL &&
get_dirty_tail() != JOURNAL_SEQ_NULL &&
get_alloc_tail() != JOURNAL_SEQ_NULL);
}
std::size_t get_num_rolls() const {
if (!check_is_ready()) {
return 0;
}
assert(get_journal_head().segment_seq >=
get_journal_tail().segment_seq);
return get_journal_head().segment_seq + 1 -
get_journal_tail().segment_seq;
}
};
class BackrefManager;
class JournalTrimmerImpl;
using JournalTrimmerImplRef = std::unique_ptr<JournalTrimmerImpl>;
/**
* Journal trimming implementation
*/
class JournalTrimmerImpl : public JournalTrimmer {
public:
struct config_t {
/// Number of minimum bytes to stop trimming dirty.
std::size_t target_journal_dirty_bytes = 0;
/// Number of minimum bytes to stop trimming allocation
/// (having the corresponding backrefs unmerged)
std::size_t target_journal_alloc_bytes = 0;
/// Number of maximum bytes to block user transactions.
std::size_t max_journal_bytes = 0;
/// Number of bytes to rewrite dirty per cycle
std::size_t rewrite_dirty_bytes_per_cycle = 0;
/// Number of bytes to rewrite backref per cycle
std::size_t rewrite_backref_bytes_per_cycle = 0;
void validate() const;
static config_t get_default(
std::size_t roll_size, journal_type_t type);
static config_t get_test(
std::size_t roll_size, journal_type_t type);
};
JournalTrimmerImpl(
BackrefManager &backref_manager,
config_t config,
journal_type_t type,
device_off_t roll_start,
device_off_t roll_size);
~JournalTrimmerImpl() = default;
/*
* JournalTrimmer interfaces
*/
journal_seq_t get_journal_head() const final {
return journal_head;
}
void set_journal_head(journal_seq_t) final;
journal_seq_t get_dirty_tail() const final {
return journal_dirty_tail;
}
journal_seq_t get_alloc_tail() const final {
return journal_alloc_tail;
}
void update_journal_tails(
journal_seq_t dirty_tail, journal_seq_t alloc_tail) final;
std::size_t get_trim_size_per_cycle() const final {
return config.rewrite_backref_bytes_per_cycle +
config.rewrite_dirty_bytes_per_cycle;
}
journal_type_t get_journal_type() const {
return journal_type;
}
void set_extent_callback(ExtentCallbackInterface *cb) {
extent_callback = cb;
}
void set_background_callback(BackgroundListener *cb) {
background_callback = cb;
}
void reset() {
journal_head = JOURNAL_SEQ_NULL;
journal_dirty_tail = JOURNAL_SEQ_NULL;
journal_alloc_tail = JOURNAL_SEQ_NULL;
}
bool should_trim() const {
return should_trim_alloc() || should_trim_dirty();
}
bool should_block_io_on_trim() const {
return get_tail_limit() >
get_journal_tail().add_offset(
journal_type, reserved_usage, roll_start, roll_size);
}
bool try_reserve_inline_usage(std::size_t usage) final {
reserved_usage += usage;
if (should_block_io_on_trim()) {
reserved_usage -= usage;
return false;
} else {
return true;
}
}
void release_inline_usage(std::size_t usage) final {
ceph_assert(reserved_usage >= usage);
reserved_usage -= usage;
}
seastar::future<> trim();
static JournalTrimmerImplRef create(
BackrefManager &backref_manager,
config_t config,
journal_type_t type,
device_off_t roll_start,
device_off_t roll_size) {
return std::make_unique<JournalTrimmerImpl>(
backref_manager, config, type, roll_start, roll_size);
}
struct stat_printer_t {
const JournalTrimmerImpl &trimmer;
bool detailed = false;
};
friend std::ostream &operator<<(std::ostream &, const stat_printer_t &);
private:
bool should_trim_dirty() const {
return get_dirty_tail_target() > journal_dirty_tail;
}
bool should_trim_alloc() const {
return get_alloc_tail_target() > journal_alloc_tail;
}
using trim_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
trim_ertr::future<> trim_dirty();
trim_ertr::future<> trim_alloc();
journal_seq_t get_tail_limit() const;
journal_seq_t get_dirty_tail_target() const;
journal_seq_t get_alloc_tail_target() const;
std::size_t get_dirty_journal_size() const;
std::size_t get_alloc_journal_size() const;
void register_metrics();
ExtentCallbackInterface *extent_callback = nullptr;
BackgroundListener *background_callback = nullptr;
BackrefManager &backref_manager;
config_t config;
journal_type_t journal_type;
device_off_t roll_start;
device_off_t roll_size;
journal_seq_t journal_head;
journal_seq_t journal_dirty_tail;
journal_seq_t journal_alloc_tail;
std::size_t reserved_usage;
seastar::metrics::metric_group metrics;
};
std::ostream &operator<<(
std::ostream &, const JournalTrimmerImpl::stat_printer_t &);
/**
* Callback interface for managing available segments
*/
class SegmentProvider {
public:
virtual const segment_info_t& get_seg_info(segment_id_t id) const = 0;
virtual segment_id_t allocate_segment(
segment_seq_t, segment_type_t, data_category_t, rewrite_gen_t) = 0;
virtual void close_segment(segment_id_t) = 0;
// set the submitted segment writes in order
virtual void update_segment_avail_bytes(segment_type_t, paddr_t) = 0;
virtual void update_modify_time(
segment_id_t, sea_time_point, std::size_t) = 0;
virtual SegmentManagerGroup* get_segment_manager_group() = 0;
virtual ~SegmentProvider() {}
};
class SpaceTrackerI {
public:
virtual int64_t allocate(
segment_id_t segment,
segment_off_t offset,
extent_len_t len) = 0;
virtual int64_t release(
segment_id_t segment,
segment_off_t offset,
extent_len_t len) = 0;
virtual int64_t get_usage(
segment_id_t segment) const = 0;
virtual bool equals(const SpaceTrackerI &other) const = 0;
virtual std::unique_ptr<SpaceTrackerI> make_empty() const = 0;
virtual void dump_usage(segment_id_t) const = 0;
virtual double calc_utilization(segment_id_t segment) const = 0;
virtual void reset() = 0;
virtual ~SpaceTrackerI() = default;
};
using SpaceTrackerIRef = std::unique_ptr<SpaceTrackerI>;
class SpaceTrackerSimple : public SpaceTrackerI {
struct segment_bytes_t {
int64_t live_bytes = 0;
segment_off_t total_bytes = 0;
};
// Tracks live space for each segment
segment_map_t<segment_bytes_t> live_bytes_by_segment;
int64_t update_usage(segment_id_t segment, int64_t delta) {
live_bytes_by_segment[segment].live_bytes += delta;
assert(live_bytes_by_segment[segment].live_bytes >= 0);
return live_bytes_by_segment[segment].live_bytes;
}
public:
SpaceTrackerSimple(const SpaceTrackerSimple &) = default;
SpaceTrackerSimple(const std::vector<SegmentManager*> &sms) {
for (auto sm : sms) {
live_bytes_by_segment.add_device(
sm->get_device_id(),
sm->get_num_segments(),
{0, sm->get_segment_size()});
}
}
int64_t allocate(
segment_id_t segment,
segment_off_t offset,
extent_len_t len) final {
return update_usage(segment, len);
}
int64_t release(
segment_id_t segment,
segment_off_t offset,
extent_len_t len) final {
return update_usage(segment, -(int64_t)len);
}
int64_t get_usage(segment_id_t segment) const final {
return live_bytes_by_segment[segment].live_bytes;
}
double calc_utilization(segment_id_t segment) const final {
auto& seg_bytes = live_bytes_by_segment[segment];
return (double)seg_bytes.live_bytes / (double)seg_bytes.total_bytes;
}
void dump_usage(segment_id_t) const final;
void reset() final {
for (auto &i : live_bytes_by_segment) {
i.second = {0, 0};
}
}
SpaceTrackerIRef make_empty() const final {
auto ret = SpaceTrackerIRef(new SpaceTrackerSimple(*this));
ret->reset();
return ret;
}
bool equals(const SpaceTrackerI &other) const;
};
class SpaceTrackerDetailed : public SpaceTrackerI {
class SegmentMap {
int64_t used = 0;
segment_off_t total_bytes = 0;
std::vector<bool> bitmap;
public:
SegmentMap(
size_t blocks,
segment_off_t total_bytes)
: total_bytes(total_bytes),
bitmap(blocks, false) {}
int64_t update_usage(int64_t delta) {
used += delta;
return used;
}
int64_t allocate(
device_segment_id_t segment,
segment_off_t offset,
extent_len_t len,
const extent_len_t block_size);
int64_t release(
device_segment_id_t segment,
segment_off_t offset,
extent_len_t len,
const extent_len_t block_size);
int64_t get_usage() const {
return used;
}
void dump_usage(extent_len_t block_size) const;
double calc_utilization() const {
return (double)used / (double)total_bytes;
}
void reset() {
used = 0;
for (auto &&i: bitmap) {
i = false;
}
}
};
// Tracks live space for each segment
segment_map_t<SegmentMap> segment_usage;
std::vector<size_t> block_size_by_segment_manager;
public:
SpaceTrackerDetailed(const SpaceTrackerDetailed &) = default;
SpaceTrackerDetailed(const std::vector<SegmentManager*> &sms)
{
block_size_by_segment_manager.resize(DEVICE_ID_MAX, 0);
for (auto sm : sms) {
segment_usage.add_device(
sm->get_device_id(),
sm->get_num_segments(),
SegmentMap(
sm->get_segment_size() / sm->get_block_size(),
sm->get_segment_size()));
block_size_by_segment_manager[sm->get_device_id()] = sm->get_block_size();
}
}
int64_t allocate(
segment_id_t segment,
segment_off_t offset,
extent_len_t len) final {
return segment_usage[segment].allocate(
segment.device_segment_id(),
offset,
len,
block_size_by_segment_manager[segment.device_id()]);
}
int64_t release(
segment_id_t segment,
segment_off_t offset,
extent_len_t len) final {
return segment_usage[segment].release(
segment.device_segment_id(),
offset,
len,
block_size_by_segment_manager[segment.device_id()]);
}
int64_t get_usage(segment_id_t segment) const final {
return segment_usage[segment].get_usage();
}
double calc_utilization(segment_id_t segment) const final {
return segment_usage[segment].calc_utilization();
}
void dump_usage(segment_id_t seg) const final;
void reset() final {
for (auto &i: segment_usage) {
i.second.reset();
}
}
SpaceTrackerIRef make_empty() const final {
auto ret = SpaceTrackerIRef(new SpaceTrackerDetailed(*this));
ret->reset();
return ret;
}
bool equals(const SpaceTrackerI &other) const;
};
template <typename T>
class block_map_t {
public:
block_map_t() {
device_to_blocks.resize(DEVICE_ID_MAX_VALID);
device_block_size.resize(DEVICE_ID_MAX_VALID);
}
void add_device(device_id_t device, std::size_t blocks, const T& init,
size_t block_size) {
ceph_assert(device <= DEVICE_ID_MAX_VALID);
ceph_assert(device_to_blocks[device].size() == 0);
ceph_assert(blocks > 0);
device_to_blocks[device].resize(blocks, init);
total_blocks += blocks;
device_block_size[device] = block_size;
}
void clear() {
device_to_blocks.clear();
device_to_blocks.resize(DEVICE_ID_MAX_VALID);
total_blocks = 0;
}
T& operator[](paddr_t block) {
ceph_assert(device_to_blocks[block.get_device_id()].size() != 0);
auto &blk = block.as_blk_paddr();
auto block_id = get_block_id(block.get_device_id(), blk.get_device_off());
return device_to_blocks[block.get_device_id()][block_id];
}
const T& operator[](paddr_t block) const {
ceph_assert(device_to_blocks[block.get_device_id()].size() != 0);
auto &blk = block.as_blk_paddr();
auto block_id = get_block_id(block.get_device_id(), blk.get_device_off());
return device_to_blocks[block.get_device_id()][block_id];
}
auto begin() {
return iterator<false>::lower_bound(*this, 0, 0);
}
auto begin() const {
return iterator<true>::lower_bound(*this, 0, 0);
}
auto end() {
return iterator<false>::end_iterator(*this);
}
auto end() const {
return iterator<true>::end_iterator(*this);
}
size_t size() const {
return total_blocks;
}
uint64_t get_block_size(device_id_t device_id) {
return device_block_size[device_id];
}
uint32_t get_block_id(device_id_t device_id, device_off_t blk_off) const {
auto block_size = device_block_size[device_id];
return blk_off == 0 ? 0 : blk_off/block_size;
}
template <bool is_const = false>
class iterator {
/// points at set being iterated over
std::conditional_t<
is_const,
const block_map_t &,
block_map_t &> parent;
/// points at current device, or DEVICE_ID_MAX_VALID if is_end()
device_id_t device_id;
/// segment at which we are pointing, 0 if is_end()
device_off_t blk_off;
/// holds referent for operator* and operator-> when !is_end()
std::optional<
std::pair<
const device_off_t,
std::conditional_t<is_const, const T&, T&>
>> current;
bool is_end() const {
return device_id == DEVICE_ID_MAX_VALID;
}
uint32_t get_block_id() {
return parent.get_block_id(device_id, blk_off);
}
void find_valid() {
assert(!is_end());
auto &device_vec = parent.device_to_blocks[device_id];
if (device_vec.size() == 0 ||
get_block_id() == device_vec.size()) {
while (++device_id < DEVICE_ID_MAX_VALID&&
parent.device_to_blocks[device_id].size() == 0);
blk_off = 0;
}
if (is_end()) {
current = std::nullopt;
} else {
current.emplace(
blk_off,
parent.device_to_blocks[device_id][get_block_id()]
);
}
}
iterator(
decltype(parent) &parent,
device_id_t device_id,
device_off_t device_block_off)
: parent(parent), device_id(device_id),
blk_off(device_block_off) {}
public:
static iterator lower_bound(
decltype(parent) &parent,
device_id_t device_id,
device_off_t block_off) {
if (device_id == DEVICE_ID_MAX_VALID) {
return end_iterator(parent);
} else {
auto ret = iterator{parent, device_id, block_off};
ret.find_valid();
return ret;
}
}
static iterator end_iterator(
decltype(parent) &parent) {
return iterator{parent, DEVICE_ID_MAX_VALID, 0};
}
iterator<is_const>& operator++() {
assert(!is_end());
auto block_size = parent.device_block_size[device_id];
blk_off += block_size;
find_valid();
return *this;
}
bool operator==(iterator<is_const> rit) {
return (device_id == rit.device_id &&
blk_off == rit.blk_off);
}
bool operator!=(iterator<is_const> rit) {
return !(*this == rit);
}
template <bool c = is_const, std::enable_if_t<c, int> = 0>
const std::pair<const device_off_t, const T&> *operator->() {
assert(!is_end());
return &*current;
}
template <bool c = is_const, std::enable_if_t<!c, int> = 0>
std::pair<const device_off_t, T&> *operator->() {
assert(!is_end());
return &*current;
}
template <bool c = is_const, std::enable_if_t<c, int> = 0>
const std::pair<const device_off_t, const T&> &operator*() {
assert(!is_end());
return *current;
}
template <bool c = is_const, std::enable_if_t<!c, int> = 0>
std::pair<const device_off_t, T&> &operator*() {
assert(!is_end());
return *current;
}
};
std::vector<std::vector<T>> device_to_blocks;
std::vector<size_t> device_block_size;
size_t total_blocks = 0;
};
class RBMSpaceTracker {
struct random_block_t {
bool used = false;
void allocate() {
used = true;
}
void release() {
used = false;
}
};
block_map_t<random_block_t> block_usage;
public:
RBMSpaceTracker(const RBMSpaceTracker &) = default;
RBMSpaceTracker(const std::vector<RandomBlockManager*> &rbms) {
for (auto rbm : rbms) {
block_usage.add_device(
rbm->get_device_id(),
rbm->get_device()->get_available_size() / rbm->get_block_size(),
{false},
rbm->get_block_size());
}
}
void allocate(
paddr_t addr,
extent_len_t len) {
paddr_t cursor = addr;
paddr_t end = addr.add_offset(len);
do {
block_usage[cursor].allocate();
cursor = cursor.add_offset(
block_usage.get_block_size(addr.get_device_id()));
} while (cursor < end);
}
void release(
paddr_t addr,
extent_len_t len) {
paddr_t cursor = addr;
paddr_t end = addr.add_offset(len);
do {
block_usage[cursor].release();
cursor = cursor.add_offset(
block_usage.get_block_size(addr.get_device_id()));
} while (cursor < end);
}
void reset() {
for (auto &i : block_usage) {
i.second = {false};
}
}
std::unique_ptr<RBMSpaceTracker> make_empty() const {
auto ret = std::make_unique<RBMSpaceTracker>(*this);
ret->reset();
return ret;
}
friend class RBMCleaner;
};
using RBMSpaceTrackerRef = std::unique_ptr<RBMSpaceTracker>;
/*
* AsyncCleaner
*
* Interface for ExtentPlacementManager::BackgroundProcess
* to do background cleaning.
*/
class AsyncCleaner {
public:
using state_t = BackgroundListener::state_t;
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual void set_background_callback(BackgroundListener *) = 0;
virtual void set_extent_callback(ExtentCallbackInterface *) = 0;
virtual store_statfs_t get_stat() const = 0;
virtual void print(std::ostream &, bool is_detailed) const = 0;
virtual bool check_usage_is_empty() const = 0;
using mount_ertr = base_ertr;
using mount_ret = mount_ertr::future<>;
virtual mount_ret mount() = 0;
virtual void mark_space_used(paddr_t, extent_len_t) = 0;
virtual void mark_space_free(paddr_t, extent_len_t) = 0;
virtual void commit_space_used(paddr_t, extent_len_t) = 0;
// try reserve the projected usage in cleaner
// returns if the reservation is successful
// if the reservation is successful, user should call
// release_projected_usage to restore.
virtual bool try_reserve_projected_usage(std::size_t) = 0;
virtual void release_projected_usage(std::size_t) = 0;
virtual bool should_block_io_on_clean() const = 0;
virtual bool can_clean_space() const = 0;
virtual bool should_clean_space() const = 0;
using clean_space_ertr = base_ertr;
using clean_space_ret = clean_space_ertr::future<>;
virtual clean_space_ret clean_space() = 0;
virtual const std::set<device_id_t>& get_device_ids() const = 0;
virtual std::size_t get_reclaim_size_per_cycle() const = 0;
// test only
virtual bool check_usage() = 0;
struct stat_printer_t {
const AsyncCleaner &cleaner;
bool detailed = false;
};
virtual ~AsyncCleaner() {}
};
using AsyncCleanerRef = std::unique_ptr<AsyncCleaner>;
std::ostream &operator<<(
std::ostream &, const AsyncCleaner::stat_printer_t &);
class SegmentCleaner;
using SegmentCleanerRef = std::unique_ptr<SegmentCleaner>;
class SegmentCleaner : public SegmentProvider, public AsyncCleaner {
public:
/// Config
struct config_t {
/// Ratio of maximum available space to disable reclaiming.
double available_ratio_gc_max = 0;
/// Ratio of minimum available space to force reclaiming.
double available_ratio_hard_limit = 0;
/// Ratio of minimum reclaimable space to stop reclaiming.
double reclaim_ratio_gc_threshold = 0;
/// Number of bytes to reclaim per cycle
std::size_t reclaim_bytes_per_cycle = 0;
void validate() const {
ceph_assert(available_ratio_gc_max > available_ratio_hard_limit);
ceph_assert(reclaim_bytes_per_cycle > 0);
}
static config_t get_default() {
return config_t{
.15, // available_ratio_gc_max
.1, // available_ratio_hard_limit
.1, // reclaim_ratio_gc_threshold
1<<20 // reclaim_bytes_per_cycle
};
}
static config_t get_test() {
return config_t{
.99, // available_ratio_gc_max
.2, // available_ratio_hard_limit
.6, // reclaim_ratio_gc_threshold
1<<20 // reclaim_bytes_per_cycle
};
}
};
SegmentCleaner(
config_t config,
SegmentManagerGroupRef&& sm_group,
BackrefManager &backref_manager,
SegmentSeqAllocator &segment_seq_allocator,
bool detailed,
bool is_cold);
void set_journal_trimmer(JournalTrimmer &_trimmer) {
trimmer = &_trimmer;
}
static SegmentCleanerRef create(
config_t config,
SegmentManagerGroupRef&& sm_group,
BackrefManager &backref_manager,
SegmentSeqAllocator &ool_seq_allocator,
bool detailed,
bool is_cold = false) {
return std::make_unique<SegmentCleaner>(
config, std::move(sm_group), backref_manager,
ool_seq_allocator, detailed, is_cold);
}
/*
* SegmentProvider interfaces
*/
const segment_info_t& get_seg_info(segment_id_t id) const final {
return segments[id];
}
segment_id_t allocate_segment(
segment_seq_t, segment_type_t, data_category_t, rewrite_gen_t) final;
void close_segment(segment_id_t segment) final;
void update_segment_avail_bytes(segment_type_t type, paddr_t offset) final {
assert(type == segment_type_t::OOL ||
trimmer != nullptr); // segment_type_t::JOURNAL
segments.update_written_to(type, offset);
background_callback->maybe_wake_background();
}
void update_modify_time(
segment_id_t id, sea_time_point tp, std::size_t num_extents) final {
ceph_assert(num_extents == 0 || tp != NULL_TIME);
segments.update_modify_time(id, tp, num_extents);
}
SegmentManagerGroup* get_segment_manager_group() final {
return sm_group.get();
}
/*
* AsyncCleaner interfaces
*/
void set_background_callback(BackgroundListener *cb) final {
background_callback = cb;
}
void set_extent_callback(ExtentCallbackInterface *cb) final {
extent_callback = cb;
}
store_statfs_t get_stat() const final {
store_statfs_t st;
st.total = segments.get_total_bytes();
st.available = segments.get_total_bytes() - stats.used_bytes;
st.allocated = stats.used_bytes;
st.data_stored = stats.used_bytes;
// TODO add per extent type counters for omap_allocated and
// internal metadata
return st;
}
void print(std::ostream &, bool is_detailed) const final;
bool check_usage_is_empty() const final {
return space_tracker->equals(*space_tracker->make_empty());
}
mount_ret mount() final;
void mark_space_used(paddr_t, extent_len_t) final;
void mark_space_free(paddr_t, extent_len_t) final;
void commit_space_used(paddr_t addr, extent_len_t len) final {
mark_space_used(addr, len);
}
bool try_reserve_projected_usage(std::size_t) final;
void release_projected_usage(size_t) final;
bool should_block_io_on_clean() const final {
assert(background_callback->is_ready());
if (get_segments_reclaimable() == 0) {
return false;
}
auto aratio = get_projected_available_ratio();
return aratio < config.available_ratio_hard_limit;
}
bool can_clean_space() const final {
assert(background_callback->is_ready());
return get_segments_reclaimable() > 0;
}
bool should_clean_space() const final {
assert(background_callback->is_ready());
if (get_segments_reclaimable() == 0) {
return false;
}
auto aratio = segments.get_available_ratio();
auto rratio = get_reclaim_ratio();
return (
(aratio < config.available_ratio_hard_limit) ||
((aratio < config.available_ratio_gc_max) &&
(rratio > config.reclaim_ratio_gc_threshold))
);
}
clean_space_ret clean_space() final;
const std::set<device_id_t>& get_device_ids() const final {
return sm_group->get_device_ids();
}
std::size_t get_reclaim_size_per_cycle() const final {
return config.reclaim_bytes_per_cycle;
}
// Testing interfaces
bool check_usage() final;
private:
/*
* 10 buckets for the number of closed segments by usage
* 2 extra buckets for the number of open and empty segments
*/
static constexpr double UTIL_STATE_OPEN = 1.05;
static constexpr double UTIL_STATE_EMPTY = 1.15;
static constexpr std::size_t UTIL_BUCKETS = 12;
static std::size_t get_bucket_index(double util) {
auto index = std::floor(util * 10);
assert(index < UTIL_BUCKETS);
return index;
}
double calc_utilization(segment_id_t id) const {
auto& info = segments[id];
if (info.is_open()) {
return UTIL_STATE_OPEN;
} else if (info.is_empty()) {
return UTIL_STATE_EMPTY;
} else {
auto ret = space_tracker->calc_utilization(id);
assert(ret >= 0 && ret < 1);
return ret;
}
}
// journal status helpers
double calc_gc_benefit_cost(
segment_id_t id,
const sea_time_point &now_time,
const sea_time_point &bound_time) const;
segment_id_t get_next_reclaim_segment() const;
struct reclaim_state_t {
rewrite_gen_t generation;
rewrite_gen_t target_generation;
segment_off_t segment_size;
paddr_t start_pos;
paddr_t end_pos;
static reclaim_state_t create(
segment_id_t segment_id,
rewrite_gen_t generation,
segment_off_t segment_size) {
ceph_assert(is_rewrite_generation(generation));
rewrite_gen_t target_gen;
if (generation < MIN_REWRITE_GENERATION) {
target_gen = MIN_REWRITE_GENERATION;
} else {
// tolerate the target_gen to exceed MAX_REWRETE_GENERATION to make EPM
// aware of its original generation for the decisions.
target_gen = generation + 1;
}
assert(is_target_rewrite_generation(target_gen));
return {generation,
target_gen,
segment_size,
P_ADDR_NULL,
paddr_t::make_seg_paddr(segment_id, 0)};
}
segment_id_t get_segment_id() const {
return end_pos.as_seg_paddr().get_segment_id();
}
bool is_complete() const {
return end_pos.as_seg_paddr().get_segment_off() >= segment_size;
}
void advance(std::size_t bytes) {
assert(!is_complete());
start_pos = end_pos;
auto &end_seg_paddr = end_pos.as_seg_paddr();
auto next_off = end_seg_paddr.get_segment_off() + bytes;
if (next_off > (std::size_t)segment_size) {
end_seg_paddr.set_segment_off(segment_size);
} else {
end_seg_paddr.set_segment_off(next_off);
}
}
};
std::optional<reclaim_state_t> reclaim_state;
using do_reclaim_space_ertr = base_ertr;
using do_reclaim_space_ret = do_reclaim_space_ertr::future<>;
do_reclaim_space_ret do_reclaim_space(
const std::vector<CachedExtentRef> &backref_extents,
const backref_pin_list_t &pin_list,
std::size_t &reclaimed,
std::size_t &runs);
/*
* Segments calculations
*/
std::size_t get_segments_in_journal() const {
if (trimmer != nullptr) {
return trimmer->get_num_rolls();
} else {
return 0;
}
}
std::size_t get_segments_in_journal_closed() const {
auto in_journal = get_segments_in_journal();
auto in_journal_open = segments.get_num_in_journal_open();
if (in_journal >= in_journal_open) {
return in_journal - in_journal_open;
} else {
return 0;
}
}
std::size_t get_segments_reclaimable() const {
assert(segments.get_num_closed() >= get_segments_in_journal_closed());
return segments.get_num_closed() - get_segments_in_journal_closed();
}
/*
* Space calculations
*/
/// the unavailable space that is not reclaimable yet
std::size_t get_unavailable_unreclaimable_bytes() const {
auto ret = (segments.get_num_open() + get_segments_in_journal_closed()) *
segments.get_segment_size();
assert(ret >= segments.get_available_bytes_in_open());
return ret - segments.get_available_bytes_in_open();
}
/// the unavailable space that can be reclaimed
std::size_t get_unavailable_reclaimable_bytes() const {
auto ret = get_segments_reclaimable() * segments.get_segment_size();
ceph_assert(ret + get_unavailable_unreclaimable_bytes() == segments.get_unavailable_bytes());
return ret;
}
/// the unavailable space that is not alive
std::size_t get_unavailable_unused_bytes() const {
assert(segments.get_unavailable_bytes() > stats.used_bytes);
return segments.get_unavailable_bytes() - stats.used_bytes;
}
double get_reclaim_ratio() const {
if (segments.get_unavailable_bytes() == 0) return 0;
return (double)get_unavailable_unused_bytes() / (double)segments.get_unavailable_bytes();
}
double get_alive_ratio() const {
return stats.used_bytes / (double)segments.get_total_bytes();
}
/*
* Space calculations (projected)
*/
std::size_t get_projected_available_bytes() const {
return (segments.get_available_bytes() > stats.projected_used_bytes) ?
segments.get_available_bytes() - stats.projected_used_bytes:
0;
}
double get_projected_available_ratio() const {
return (double)get_projected_available_bytes() /
(double)segments.get_total_bytes();
}
using scan_extents_ertr = SegmentManagerGroup::scan_valid_records_ertr;
using scan_extents_ret = scan_extents_ertr::future<>;
scan_extents_ret scan_no_tail_segment(
const segment_header_t& header,
segment_id_t segment_id);
void adjust_segment_util(double old_usage, double new_usage) {
auto old_index = get_bucket_index(old_usage);
auto new_index = get_bucket_index(new_usage);
assert(stats.segment_util.buckets[old_index].count > 0);
stats.segment_util.buckets[old_index].count--;
stats.segment_util.buckets[new_index].count++;
}
void init_mark_segment_closed(
segment_id_t segment,
segment_seq_t seq,
segment_type_t s_type,
data_category_t category,
rewrite_gen_t generation) {
assert(background_callback->get_state() == state_t::MOUNT);
ceph_assert(s_type == segment_type_t::OOL ||
trimmer != nullptr); // segment_type_t::JOURNAL
auto old_usage = calc_utilization(segment);
segments.init_closed(segment, seq, s_type, category, generation);
auto new_usage = calc_utilization(segment);
adjust_segment_util(old_usage, new_usage);
if (s_type == segment_type_t::OOL) {
ool_segment_seq_allocator.set_next_segment_seq(seq);
}
}
const bool detailed;
const bool is_cold;
const config_t config;
SegmentManagerGroupRef sm_group;
BackrefManager &backref_manager;
SpaceTrackerIRef space_tracker;
segments_info_t segments;
struct {
/**
* used_bytes
*
* Bytes occupied by live extents
*/
uint64_t used_bytes = 0;
/**
* projected_used_bytes
*
* Sum of projected bytes used by each transaction between throttle
* acquisition and commit completion. See try_reserve_projected_usage()
*/
uint64_t projected_used_bytes = 0;
uint64_t projected_count = 0;
uint64_t projected_used_bytes_sum = 0;
uint64_t closed_journal_used_bytes = 0;
uint64_t closed_journal_total_bytes = 0;
uint64_t closed_ool_used_bytes = 0;
uint64_t closed_ool_total_bytes = 0;
uint64_t reclaiming_bytes = 0;
uint64_t reclaimed_bytes = 0;
uint64_t reclaimed_segment_bytes = 0;
seastar::metrics::histogram segment_util;
} stats;
seastar::metrics::metric_group metrics;
void register_metrics();
// optional, set if this cleaner is assigned to SegmentedJournal
JournalTrimmer *trimmer = nullptr;
ExtentCallbackInterface *extent_callback = nullptr;
BackgroundListener *background_callback = nullptr;
// TODO: drop once paddr->journal_seq_t is introduced
SegmentSeqAllocator &ool_segment_seq_allocator;
};
class RBMCleaner;
using RBMCleanerRef = std::unique_ptr<RBMCleaner>;
class RBMCleaner : public AsyncCleaner {
public:
RBMCleaner(
RBMDeviceGroupRef&& rb_group,
BackrefManager &backref_manager,
bool detailed);
static RBMCleanerRef create(
RBMDeviceGroupRef&& rb_group,
BackrefManager &backref_manager,
bool detailed) {
return std::make_unique<RBMCleaner>(
std::move(rb_group), backref_manager, detailed);
}
RBMDeviceGroup* get_rb_group() {
return rb_group.get();
}
/*
* AsyncCleaner interfaces
*/
void set_background_callback(BackgroundListener *cb) final {
background_callback = cb;
}
void set_extent_callback(ExtentCallbackInterface *cb) final {
extent_callback = cb;
}
store_statfs_t get_stat() const final {
store_statfs_t st;
st.total = get_total_bytes();
st.available = get_total_bytes() - get_journal_bytes() - stats.used_bytes;
st.allocated = get_journal_bytes() + stats.used_bytes;
st.data_stored = get_journal_bytes() + stats.used_bytes;
return st;
}
void print(std::ostream &, bool is_detailed) const final;
mount_ret mount() final;
void mark_space_used(paddr_t, extent_len_t) final;
void mark_space_free(paddr_t, extent_len_t) final;
void commit_space_used(paddr_t, extent_len_t) final;
bool try_reserve_projected_usage(std::size_t) final;
void release_projected_usage(size_t) final;
bool should_block_io_on_clean() const final {
return false;
}
bool can_clean_space() const final {
return false;
}
bool should_clean_space() const final {
return false;
}
clean_space_ret clean_space() final;
const std::set<device_id_t>& get_device_ids() const final {
return rb_group->get_device_ids();
}
std::size_t get_reclaim_size_per_cycle() const final {
return 0;
}
RandomBlockManager* get_rbm(paddr_t paddr) {
auto rbs = rb_group->get_rb_managers();
for (auto p : rbs) {
if (p->get_device_id() == paddr.get_device_id()) {
return p;
}
}
return nullptr;
}
paddr_t alloc_paddr(extent_len_t length) {
// TODO: implement allocation strategy (dirty metadata and multiple devices)
auto rbs = rb_group->get_rb_managers();
auto paddr = rbs[0]->alloc_extent(length);
stats.used_bytes += length;
return paddr;
}
size_t get_total_bytes() const {
auto rbs = rb_group->get_rb_managers();
size_t total = 0;
for (auto p : rbs) {
total += p->get_device()->get_available_size();
}
return total;
}
size_t get_journal_bytes() const {
auto rbs = rb_group->get_rb_managers();
size_t total = 0;
for (auto p : rbs) {
total += p->get_journal_size();
}
return total;
}
// Testing interfaces
bool check_usage() final;
bool check_usage_is_empty() const final {
// TODO
return true;
}
private:
bool equals(const RBMSpaceTracker &other) const;
const bool detailed;
RBMDeviceGroupRef rb_group;
BackrefManager &backref_manager;
struct {
/**
* used_bytes
*
* Bytes occupied by live extents
*/
uint64_t used_bytes = 0;
/**
* projected_used_bytes
*
* Sum of projected bytes used by each transaction between throttle
* acquisition and commit completion. See reserve_projected_usage()
*/
uint64_t projected_used_bytes = 0;
} stats;
seastar::metrics::metric_group metrics;
void register_metrics();
ExtentCallbackInterface *extent_callback = nullptr;
BackgroundListener *background_callback = nullptr;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::segment_info_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::segments_info_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::AsyncCleaner::stat_printer_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::JournalTrimmerImpl::stat_printer_t> : fmt::ostream_formatter {};
#endif
| 47,920 | 26.196935 | 121 | h |
null | ceph-main/src/crimson/os/seastore/backref_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/backref_manager.h"
#include "crimson/os/seastore/backref/btree_backref_manager.h"
namespace crimson::os::seastore {
BackrefManagerRef create_backref_manager(
Cache &cache)
{
return BackrefManagerRef(
new backref::BtreeBackrefManager(cache));
}
} // namespace crimson::os::seastore::backref
| 470 | 23.789474 | 70 | cc |
null | ceph-main/src/crimson/os/seastore/backref_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/transaction.h"
namespace crimson::os::seastore {
/**
* Abstract interface for managing back references that map paddr_t to laddr_t
*/
class BackrefManager {
public:
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using base_iertr = trans_iertr<base_ertr>;
using mkfs_iertr = base_iertr;
using mkfs_ret = mkfs_iertr::future<>;
virtual mkfs_ret mkfs(
Transaction &t) = 0;
/**
* Fetches mappings for paddr_t in range [offset, offset + len)
*
* Future will not resolve until all pins have resolved
*/
using get_mappings_iertr = base_iertr;
using get_mappings_ret = get_mappings_iertr::future<backref_pin_list_t>;
virtual get_mappings_ret get_mappings(
Transaction &t,
paddr_t offset,
paddr_t end) = 0;
/**
* Fetches the mapping for paddr_t
*
* Future will not resolve until the pin has resolved
*/
using get_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using get_mapping_ret = get_mapping_iertr::future<BackrefMappingRef>;
virtual get_mapping_ret get_mapping(
Transaction &t,
paddr_t offset) = 0;
/**
* rewrite_extent
*
* rewrite extent into passed transaction
*/
using rewrite_extent_iertr = base_iertr;
using rewrite_extent_ret = rewrite_extent_iertr::future<>;
virtual rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) = 0;
/**
* Insert new paddr_t -> laddr_t mapping
*/
using new_mapping_iertr = base_iertr;
using new_mapping_ret = new_mapping_iertr::future<BackrefMappingRef>;
virtual new_mapping_ret new_mapping(
Transaction &t,
paddr_t key,
extent_len_t len,
laddr_t val,
extent_types_t type) = 0;
/**
* Check if a CachedExtent is alive, should be called
* after replay on each cached extent.
*
* @return returns whether the extent is alive
*/
using init_cached_extent_iertr = base_iertr;
using init_cached_extent_ret = init_cached_extent_iertr::future<bool>;
virtual init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) = 0;
virtual Cache::backref_entry_query_mset_t
get_cached_backref_entries_in_range(
paddr_t start,
paddr_t end) = 0;
using retrieve_backref_extents_in_range_iertr = base_iertr;
using retrieve_backref_extents_in_range_ret =
retrieve_backref_extents_in_range_iertr::future<std::vector<CachedExtentRef>>;
virtual retrieve_backref_extents_in_range_ret
retrieve_backref_extents_in_range(
Transaction &t,
paddr_t start,
paddr_t end) = 0;
virtual void cache_new_backref_extent(
paddr_t paddr,
paddr_t key,
extent_types_t type) = 0;
/**
* merge in-cache paddr_t -> laddr_t mappings to the on-disk backref tree
*/
using merge_cached_backrefs_iertr = base_iertr;
using merge_cached_backrefs_ret = merge_cached_backrefs_iertr::future<journal_seq_t>;
virtual merge_cached_backrefs_ret merge_cached_backrefs(
Transaction &t,
const journal_seq_t &limit,
const uint64_t max) = 0;
struct remove_mapping_result_t {
paddr_t offset = P_ADDR_NULL;
extent_len_t len = 0;
laddr_t laddr = L_ADDR_NULL;
};
/**
* delete the mapping for paddr_t offset
*/
using remove_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using remove_mapping_ret = remove_mapping_iertr::future<remove_mapping_result_t>;
virtual remove_mapping_ret remove_mapping(
Transaction &t,
paddr_t offset) = 0;
using check_child_trackers_ret = base_iertr::future<>;
virtual check_child_trackers_ret check_child_trackers(Transaction &t) = 0;
/**
* scan all extents in both tree and cache,
* including backref extents, logical extents and lba extents,
* visit them with scan_mapped_space_func_t
*/
using scan_mapped_space_iertr = base_iertr;
using scan_mapped_space_ret = scan_mapped_space_iertr::future<>;
using scan_mapped_space_func_t = std::function<
void(paddr_t, paddr_t, extent_len_t, extent_types_t, laddr_t)>;
virtual scan_mapped_space_ret scan_mapped_space(
Transaction &t,
scan_mapped_space_func_t &&f) = 0;
virtual ~BackrefManager() {}
};
using BackrefManagerRef =
std::unique_ptr<BackrefManager>;
BackrefManagerRef create_backref_manager(
Cache &cache);
} // namespace crimson::os::seastore::backref
| 4,580 | 28.365385 | 87 | h |
null | ceph-main/src/crimson/os/seastore/cache.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/cache.h"
#include <sstream>
#include <string_view>
#include <seastar/core/metrics.hh>
#include "crimson/os/seastore/logging.h"
#include "crimson/common/config_proxy.h"
#include "crimson/os/seastore/async_cleaner.h"
// included for get_extent_by_type
#include "crimson/os/seastore/collection_manager/collection_flat_node.h"
#include "crimson/os/seastore/lba_manager/btree/lba_btree_node.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.h"
#include "crimson/os/seastore/object_data_handler.h"
#include "crimson/os/seastore/collection_manager/collection_flat_node.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/seastore.h"
#include "crimson/os/seastore/backref/backref_tree_node.h"
#include "test/crimson/seastore/test_block.h"
using std::string_view;
SET_SUBSYS(seastore_cache);
namespace crimson::os::seastore {
std::ostream &operator<<(std::ostream &out, const backref_entry_t &ent) {
return out << "backref_entry_t{"
<< ent.paddr << "~" << ent.len << ", "
<< "laddr: " << ent.laddr << ", "
<< "type: " << ent.type << ", "
<< "seq: " << ent.seq << ", "
<< "}";
}
Cache::Cache(
ExtentPlacementManager &epm)
: epm(epm),
lru(crimson::common::get_conf<Option::size_t>(
"seastore_cache_lru_size"))
{
LOG_PREFIX(Cache::Cache);
INFO("created, lru_size={}", lru.get_capacity());
register_metrics();
segment_providers_by_device_id.resize(DEVICE_ID_MAX, nullptr);
}
Cache::~Cache()
{
LOG_PREFIX(Cache::~Cache);
for (auto &i: extents) {
ERROR("extent is still alive -- {}", i);
}
ceph_assert(extents.empty());
}
Cache::retire_extent_ret Cache::retire_extent_addr(
Transaction &t, paddr_t addr, extent_len_t length)
{
LOG_PREFIX(Cache::retire_extent_addr);
TRACET("retire {}~{}", t, addr, length);
assert(addr.is_real() && !addr.is_block_relative());
CachedExtentRef ext;
auto result = t.get_extent(addr, &ext);
if (result == Transaction::get_extent_ret::PRESENT) {
DEBUGT("retire {}~{} on t -- {}", t, addr, length, *ext);
t.add_to_retired_set(CachedExtentRef(&*ext));
return retire_extent_iertr::now();
} else if (result == Transaction::get_extent_ret::RETIRED) {
ERRORT("retire {}~{} failed, already retired -- {}", t, addr, length, *ext);
ceph_abort();
}
// any relative addr must have been on the transaction
assert(!addr.is_relative());
// absent from transaction
// retiring is not included by the cache hit metrics
ext = query_cache(addr, nullptr);
if (ext) {
DEBUGT("retire {}~{} in cache -- {}", t, addr, length, *ext);
if (ext->get_type() != extent_types_t::RETIRED_PLACEHOLDER) {
t.add_to_read_set(ext);
t.add_to_retired_set(ext);
return retire_extent_iertr::now();
}
// the retired-placeholder exists
} else {
// add a new placeholder to Cache
ext = CachedExtent::make_cached_extent_ref<
RetiredExtentPlaceholder>(length);
ext->init(CachedExtent::extent_state_t::CLEAN,
addr,
PLACEMENT_HINT_NULL,
NULL_GENERATION,
TRANS_ID_NULL);
DEBUGT("retire {}~{} as placeholder, add extent -- {}",
t, addr, length, *ext);
const auto t_src = t.get_src();
add_extent(ext, &t_src);
}
// add the retired-placeholder to transaction
t.add_to_read_set(ext);
t.add_to_retired_set(ext);
return retire_extent_iertr::now();
}
void Cache::dump_contents()
{
LOG_PREFIX(Cache::dump_contents);
DEBUG("enter");
for (auto &&i: extents) {
DEBUG("live {}", i);
}
DEBUG("exit");
}
void Cache::register_metrics()
{
LOG_PREFIX(Cache::register_metrics);
DEBUG("");
stats = {};
namespace sm = seastar::metrics;
using src_t = Transaction::src_t;
std::map<src_t, sm::label_instance> labels_by_src {
{src_t::MUTATE, sm::label_instance("src", "MUTATE")},
{src_t::READ, sm::label_instance("src", "READ")},
{src_t::TRIM_DIRTY, sm::label_instance("src", "TRIM_DIRTY")},
{src_t::TRIM_ALLOC, sm::label_instance("src", "TRIM_ALLOC")},
{src_t::CLEANER_MAIN, sm::label_instance("src", "CLEANER_MAIN")},
{src_t::CLEANER_COLD, sm::label_instance("src", "CLEANER_COLD")},
};
assert(labels_by_src.size() == (std::size_t)src_t::MAX);
std::map<extent_types_t, sm::label_instance> labels_by_ext {
{extent_types_t::ROOT, sm::label_instance("ext", "ROOT")},
{extent_types_t::LADDR_INTERNAL, sm::label_instance("ext", "LADDR_INTERNAL")},
{extent_types_t::LADDR_LEAF, sm::label_instance("ext", "LADDR_LEAF")},
{extent_types_t::DINK_LADDR_LEAF, sm::label_instance("ext", "DINK_LADDR_LEAF")},
{extent_types_t::OMAP_INNER, sm::label_instance("ext", "OMAP_INNER")},
{extent_types_t::OMAP_LEAF, sm::label_instance("ext", "OMAP_LEAF")},
{extent_types_t::ONODE_BLOCK_STAGED, sm::label_instance("ext", "ONODE_BLOCK_STAGED")},
{extent_types_t::COLL_BLOCK, sm::label_instance("ext", "COLL_BLOCK")},
{extent_types_t::OBJECT_DATA_BLOCK, sm::label_instance("ext", "OBJECT_DATA_BLOCK")},
{extent_types_t::RETIRED_PLACEHOLDER, sm::label_instance("ext", "RETIRED_PLACEHOLDER")},
{extent_types_t::ALLOC_INFO, sm::label_instance("ext", "ALLOC_INFO")},
{extent_types_t::JOURNAL_TAIL, sm::label_instance("ext", "JOURNAL_TAIL")},
{extent_types_t::TEST_BLOCK, sm::label_instance("ext", "TEST_BLOCK")},
{extent_types_t::TEST_BLOCK_PHYSICAL, sm::label_instance("ext", "TEST_BLOCK_PHYSICAL")},
{extent_types_t::BACKREF_INTERNAL, sm::label_instance("ext", "BACKREF_INTERNAL")},
{extent_types_t::BACKREF_LEAF, sm::label_instance("ext", "BACKREF_LEAF")}
};
assert(labels_by_ext.size() == (std::size_t)extent_types_t::NONE);
/*
* trans_created
*/
for (auto& [src, src_label] : labels_by_src) {
metrics.add_group(
"cache",
{
sm::make_counter(
"trans_created",
get_by_src(stats.trans_created_by_src, src),
sm::description("total number of transaction created"),
{src_label}
),
}
);
}
/*
* cache_query: cache_access and cache_hit
*/
for (auto& [src, src_label] : labels_by_src) {
metrics.add_group(
"cache",
{
sm::make_counter(
"cache_access",
get_by_src(stats.cache_query_by_src, src).access,
sm::description("total number of cache accesses"),
{src_label}
),
sm::make_counter(
"cache_hit",
get_by_src(stats.cache_query_by_src, src).hit,
sm::description("total number of cache hits"),
{src_label}
),
}
);
}
{
/*
* efforts discarded/committed
*/
auto effort_label = sm::label("effort");
// invalidated efforts
using namespace std::literals::string_view_literals;
const string_view invalidated_effort_names[] = {
"READ"sv,
"MUTATE"sv,
"RETIRE"sv,
"FRESH"sv,
"FRESH_OOL_WRITTEN"sv,
};
for (auto& [src, src_label] : labels_by_src) {
auto& efforts = get_by_src(stats.invalidated_efforts_by_src, src);
for (auto& [ext, ext_label] : labels_by_ext) {
auto& counter = get_by_ext(efforts.num_trans_invalidated, ext);
metrics.add_group(
"cache",
{
sm::make_counter(
"trans_invalidated_by_extent",
counter,
sm::description("total number of transactions invalidated by extents"),
{src_label, ext_label}
),
}
);
}
if (src == src_t::READ) {
// read transaction won't have non-read efforts
auto read_effort_label = effort_label("READ");
metrics.add_group(
"cache",
{
sm::make_counter(
"invalidated_extents",
efforts.read.num,
sm::description("extents of invalidated transactions"),
{src_label, read_effort_label}
),
sm::make_counter(
"invalidated_extent_bytes",
efforts.read.bytes,
sm::description("extent bytes of invalidated transactions"),
{src_label, read_effort_label}
),
}
);
continue;
}
// non READ invalidated efforts
for (auto& effort_name : invalidated_effort_names) {
auto& effort = [&effort_name, &efforts]() -> io_stat_t& {
if (effort_name == "READ") {
return efforts.read;
} else if (effort_name == "MUTATE") {
return efforts.mutate;
} else if (effort_name == "RETIRE") {
return efforts.retire;
} else if (effort_name == "FRESH") {
return efforts.fresh;
} else {
assert(effort_name == "FRESH_OOL_WRITTEN");
return efforts.fresh_ool_written;
}
}();
metrics.add_group(
"cache",
{
sm::make_counter(
"invalidated_extents",
effort.num,
sm::description("extents of invalidated transactions"),
{src_label, effort_label(effort_name)}
),
sm::make_counter(
"invalidated_extent_bytes",
effort.bytes,
sm::description("extent bytes of invalidated transactions"),
{src_label, effort_label(effort_name)}
),
}
);
} // effort_name
metrics.add_group(
"cache",
{
sm::make_counter(
"trans_invalidated",
efforts.total_trans_invalidated,
sm::description("total number of transactions invalidated"),
{src_label}
),
sm::make_counter(
"invalidated_delta_bytes",
efforts.mutate_delta_bytes,
sm::description("delta bytes of invalidated transactions"),
{src_label}
),
sm::make_counter(
"invalidated_ool_records",
efforts.num_ool_records,
sm::description("number of ool-records from invalidated transactions"),
{src_label}
),
sm::make_counter(
"invalidated_ool_record_bytes",
efforts.ool_record_bytes,
sm::description("bytes of ool-record from invalidated transactions"),
{src_label}
),
}
);
} // src
// committed efforts
const string_view committed_effort_names[] = {
"READ"sv,
"MUTATE"sv,
"RETIRE"sv,
"FRESH_INVALID"sv,
"FRESH_INLINE"sv,
"FRESH_OOL"sv,
};
for (auto& [src, src_label] : labels_by_src) {
if (src == src_t::READ) {
// READ transaction won't commit
continue;
}
auto& efforts = get_by_src(stats.committed_efforts_by_src, src);
metrics.add_group(
"cache",
{
sm::make_counter(
"trans_committed",
efforts.num_trans,
sm::description("total number of transaction committed"),
{src_label}
),
sm::make_counter(
"committed_ool_records",
efforts.num_ool_records,
sm::description("number of ool-records from committed transactions"),
{src_label}
),
sm::make_counter(
"committed_ool_record_metadata_bytes",
efforts.ool_record_metadata_bytes,
sm::description("bytes of ool-record metadata from committed transactions"),
{src_label}
),
sm::make_counter(
"committed_ool_record_data_bytes",
efforts.ool_record_data_bytes,
sm::description("bytes of ool-record data from committed transactions"),
{src_label}
),
sm::make_counter(
"committed_inline_record_metadata_bytes",
efforts.inline_record_metadata_bytes,
sm::description("bytes of inline-record metadata from committed transactions"
"(excludes delta buffer)"),
{src_label}
),
}
);
for (auto& effort_name : committed_effort_names) {
auto& effort_by_ext = [&efforts, &effort_name]()
-> counter_by_extent_t<io_stat_t>& {
if (effort_name == "READ") {
return efforts.read_by_ext;
} else if (effort_name == "MUTATE") {
return efforts.mutate_by_ext;
} else if (effort_name == "RETIRE") {
return efforts.retire_by_ext;
} else if (effort_name == "FRESH_INVALID") {
return efforts.fresh_invalid_by_ext;
} else if (effort_name == "FRESH_INLINE") {
return efforts.fresh_inline_by_ext;
} else {
assert(effort_name == "FRESH_OOL");
return efforts.fresh_ool_by_ext;
}
}();
for (auto& [ext, ext_label] : labels_by_ext) {
auto& effort = get_by_ext(effort_by_ext, ext);
metrics.add_group(
"cache",
{
sm::make_counter(
"committed_extents",
effort.num,
sm::description("extents of committed transactions"),
{src_label, effort_label(effort_name), ext_label}
),
sm::make_counter(
"committed_extent_bytes",
effort.bytes,
sm::description("extent bytes of committed transactions"),
{src_label, effort_label(effort_name), ext_label}
),
}
);
} // ext
} // effort_name
auto& delta_by_ext = efforts.delta_bytes_by_ext;
for (auto& [ext, ext_label] : labels_by_ext) {
auto& value = get_by_ext(delta_by_ext, ext);
metrics.add_group(
"cache",
{
sm::make_counter(
"committed_delta_bytes",
value,
sm::description("delta bytes of committed transactions"),
{src_label, ext_label}
),
}
);
} // ext
} // src
// successful read efforts
metrics.add_group(
"cache",
{
sm::make_counter(
"trans_read_successful",
stats.success_read_efforts.num_trans,
sm::description("total number of successful read transactions")
),
sm::make_counter(
"successful_read_extents",
stats.success_read_efforts.read.num,
sm::description("extents of successful read transactions")
),
sm::make_counter(
"successful_read_extent_bytes",
stats.success_read_efforts.read.bytes,
sm::description("extent bytes of successful read transactions")
),
}
);
}
/**
* Cached extents (including placeholders)
*
* Dirty extents
*/
metrics.add_group(
"cache",
{
sm::make_counter(
"cached_extents",
[this] {
return extents.size();
},
sm::description("total number of cached extents")
),
sm::make_counter(
"cached_extent_bytes",
[this] {
return extents.get_bytes();
},
sm::description("total bytes of cached extents")
),
sm::make_counter(
"dirty_extents",
[this] {
return dirty.size();
},
sm::description("total number of dirty extents")
),
sm::make_counter(
"dirty_extent_bytes",
stats.dirty_bytes,
sm::description("total bytes of dirty extents")
),
sm::make_counter(
"cache_lru_size_bytes",
[this] {
return lru.get_current_contents_bytes();
},
sm::description("total bytes pinned by the lru")
),
sm::make_counter(
"cache_lru_size_extents",
[this] {
return lru.get_current_contents_extents();
},
sm::description("total extents pinned by the lru")
),
}
);
/**
* tree stats
*/
auto tree_label = sm::label("tree");
auto onode_label = tree_label("ONODE");
auto omap_label = tree_label("OMAP");
auto lba_label = tree_label("LBA");
auto backref_label = tree_label("BACKREF");
auto register_tree_metrics = [&labels_by_src, &onode_label, &omap_label, this](
const sm::label_instance& tree_label,
uint64_t& tree_depth,
int64_t& tree_extents_num,
counter_by_src_t<tree_efforts_t>& committed_tree_efforts,
counter_by_src_t<tree_efforts_t>& invalidated_tree_efforts) {
metrics.add_group(
"cache",
{
sm::make_counter(
"tree_depth",
tree_depth,
sm::description("the depth of tree"),
{tree_label}
),
sm::make_counter(
"tree_extents_num",
tree_extents_num,
sm::description("num of extents of the tree"),
{tree_label}
)
}
);
for (auto& [src, src_label] : labels_by_src) {
if (src == src_t::READ) {
// READ transaction won't contain any tree inserts and erases
continue;
}
if (is_background_transaction(src) &&
(tree_label == onode_label ||
tree_label == omap_label)) {
// CLEANER transaction won't contain any onode/omap tree operations
continue;
}
auto& committed_efforts = get_by_src(committed_tree_efforts, src);
auto& invalidated_efforts = get_by_src(invalidated_tree_efforts, src);
metrics.add_group(
"cache",
{
sm::make_counter(
"tree_inserts_committed",
committed_efforts.num_inserts,
sm::description("total number of committed insert operations"),
{tree_label, src_label}
),
sm::make_counter(
"tree_erases_committed",
committed_efforts.num_erases,
sm::description("total number of committed erase operations"),
{tree_label, src_label}
),
sm::make_counter(
"tree_updates_committed",
committed_efforts.num_updates,
sm::description("total number of committed update operations"),
{tree_label, src_label}
),
sm::make_counter(
"tree_inserts_invalidated",
invalidated_efforts.num_inserts,
sm::description("total number of invalidated insert operations"),
{tree_label, src_label}
),
sm::make_counter(
"tree_erases_invalidated",
invalidated_efforts.num_erases,
sm::description("total number of invalidated erase operations"),
{tree_label, src_label}
),
sm::make_counter(
"tree_updates_invalidated",
invalidated_efforts.num_updates,
sm::description("total number of invalidated update operations"),
{tree_label, src_label}
),
}
);
}
};
register_tree_metrics(
onode_label,
stats.onode_tree_depth,
stats.onode_tree_extents_num,
stats.committed_onode_tree_efforts,
stats.invalidated_onode_tree_efforts);
register_tree_metrics(
omap_label,
stats.omap_tree_depth,
stats.omap_tree_extents_num,
stats.committed_omap_tree_efforts,
stats.invalidated_omap_tree_efforts);
register_tree_metrics(
lba_label,
stats.lba_tree_depth,
stats.lba_tree_extents_num,
stats.committed_lba_tree_efforts,
stats.invalidated_lba_tree_efforts);
register_tree_metrics(
backref_label,
stats.backref_tree_depth,
stats.backref_tree_extents_num,
stats.committed_backref_tree_efforts,
stats.invalidated_backref_tree_efforts);
/**
* conflict combinations
*/
auto srcs_label = sm::label("srcs");
auto num_srcs = static_cast<std::size_t>(Transaction::src_t::MAX);
std::size_t srcs_index = 0;
for (uint8_t src2_int = 0; src2_int < num_srcs; ++src2_int) {
auto src2 = static_cast<Transaction::src_t>(src2_int);
for (uint8_t src1_int = src2_int; src1_int < num_srcs; ++src1_int) {
++srcs_index;
auto src1 = static_cast<Transaction::src_t>(src1_int);
// impossible combinations
// should be consistent with checks in account_conflict()
if ((src1 == Transaction::src_t::READ &&
src2 == Transaction::src_t::READ) ||
(src1 == Transaction::src_t::TRIM_DIRTY &&
src2 == Transaction::src_t::TRIM_DIRTY) ||
(src1 == Transaction::src_t::CLEANER_MAIN &&
src2 == Transaction::src_t::CLEANER_MAIN) ||
(src1 == Transaction::src_t::CLEANER_COLD &&
src2 == Transaction::src_t::CLEANER_COLD) ||
(src1 == Transaction::src_t::TRIM_ALLOC &&
src2 == Transaction::src_t::TRIM_ALLOC)) {
continue;
}
std::ostringstream oss;
oss << src1 << "," << src2;
metrics.add_group(
"cache",
{
sm::make_counter(
"trans_srcs_invalidated",
stats.trans_conflicts_by_srcs[srcs_index - 1],
sm::description("total number conflicted transactions by src pair"),
{srcs_label(oss.str())}
),
}
);
}
}
assert(srcs_index == NUM_SRC_COMB);
srcs_index = 0;
for (uint8_t src_int = 0; src_int < num_srcs; ++src_int) {
++srcs_index;
auto src = static_cast<Transaction::src_t>(src_int);
std::ostringstream oss;
oss << "UNKNOWN," << src;
metrics.add_group(
"cache",
{
sm::make_counter(
"trans_srcs_invalidated",
stats.trans_conflicts_by_unknown[srcs_index - 1],
sm::description("total number conflicted transactions by src pair"),
{srcs_label(oss.str())}
),
}
);
}
/**
* rewrite version
*/
metrics.add_group(
"cache",
{
sm::make_counter(
"version_count_dirty",
stats.committed_dirty_version.num,
sm::description("total number of rewrite-dirty extents")
),
sm::make_counter(
"version_sum_dirty",
stats.committed_dirty_version.version,
sm::description("sum of the version from rewrite-dirty extents")
),
sm::make_counter(
"version_count_reclaim",
stats.committed_reclaim_version.num,
sm::description("total number of rewrite-reclaim extents")
),
sm::make_counter(
"version_sum_reclaim",
stats.committed_reclaim_version.version,
sm::description("sum of the version from rewrite-reclaim extents")
),
}
);
}
void Cache::add_extent(
CachedExtentRef ref,
const Transaction::src_t* p_src=nullptr)
{
assert(ref->is_valid());
assert(ref->user_hint == PLACEMENT_HINT_NULL);
assert(ref->rewrite_generation == NULL_GENERATION);
extents.insert(*ref);
if (ref->is_dirty()) {
add_to_dirty(ref);
} else {
touch_extent(*ref, p_src);
}
}
void Cache::mark_dirty(CachedExtentRef ref)
{
if (ref->is_dirty()) {
assert(ref->primary_ref_list_hook.is_linked());
return;
}
lru.remove_from_lru(*ref);
ref->state = CachedExtent::extent_state_t::DIRTY;
add_to_dirty(ref);
}
void Cache::add_to_dirty(CachedExtentRef ref)
{
assert(ref->is_dirty());
assert(!ref->primary_ref_list_hook.is_linked());
ceph_assert(ref->get_modify_time() != NULL_TIME);
intrusive_ptr_add_ref(&*ref);
dirty.push_back(*ref);
stats.dirty_bytes += ref->get_length();
}
void Cache::remove_from_dirty(CachedExtentRef ref)
{
if (ref->is_dirty()) {
ceph_assert(ref->primary_ref_list_hook.is_linked());
stats.dirty_bytes -= ref->get_length();
dirty.erase(dirty.s_iterator_to(*ref));
intrusive_ptr_release(&*ref);
} else {
ceph_assert(!ref->primary_ref_list_hook.is_linked());
}
}
void Cache::remove_extent(CachedExtentRef ref)
{
assert(ref->is_valid());
if (ref->is_dirty()) {
remove_from_dirty(ref);
} else if (!ref->is_placeholder()) {
lru.remove_from_lru(*ref);
}
extents.erase(*ref);
}
void Cache::commit_retire_extent(
Transaction& t,
CachedExtentRef ref)
{
remove_extent(ref);
ref->dirty_from_or_retired_at = JOURNAL_SEQ_NULL;
invalidate_extent(t, *ref);
}
void Cache::commit_replace_extent(
Transaction& t,
CachedExtentRef next,
CachedExtentRef prev)
{
assert(next->is_dirty());
assert(next->get_paddr() == prev->get_paddr());
assert(next->version == prev->version + 1);
extents.replace(*next, *prev);
if (prev->get_type() == extent_types_t::ROOT) {
assert(prev->is_clean()
|| prev->primary_ref_list_hook.is_linked());
if (prev->is_dirty()) {
stats.dirty_bytes -= prev->get_length();
dirty.erase(dirty.s_iterator_to(*prev));
intrusive_ptr_release(&*prev);
}
add_to_dirty(next);
} else if (prev->is_dirty()) {
assert(prev->get_dirty_from() == next->get_dirty_from());
assert(prev->primary_ref_list_hook.is_linked());
auto prev_it = dirty.iterator_to(*prev);
dirty.insert(prev_it, *next);
dirty.erase(prev_it);
intrusive_ptr_release(&*prev);
intrusive_ptr_add_ref(&*next);
} else {
lru.remove_from_lru(*prev);
add_to_dirty(next);
}
next->on_replace_prior(t);
invalidate_extent(t, *prev);
}
void Cache::invalidate_extent(
Transaction& t,
CachedExtent& extent)
{
if (!extent.may_conflict()) {
assert(extent.transactions.empty());
extent.set_invalid(t);
return;
}
LOG_PREFIX(Cache::invalidate_extent);
bool do_conflict_log = true;
for (auto &&i: extent.transactions) {
if (!i.t->conflicted) {
if (do_conflict_log) {
SUBDEBUGT(seastore_t, "conflict begin -- {}", t, extent);
do_conflict_log = false;
}
assert(!i.t->is_weak());
account_conflict(t.get_src(), i.t->get_src());
mark_transaction_conflicted(*i.t, extent);
}
}
extent.set_invalid(t);
}
void Cache::mark_transaction_conflicted(
Transaction& t, CachedExtent& conflicting_extent)
{
LOG_PREFIX(Cache::mark_transaction_conflicted);
SUBTRACET(seastore_t, "", t);
assert(!t.conflicted);
t.conflicted = true;
auto& efforts = get_by_src(stats.invalidated_efforts_by_src,
t.get_src());
++efforts.total_trans_invalidated;
auto& counter = get_by_ext(efforts.num_trans_invalidated,
conflicting_extent.get_type());
++counter;
io_stat_t read_stat;
for (auto &i: t.read_set) {
read_stat.increment(i.ref->get_length());
}
efforts.read.increment_stat(read_stat);
if (t.get_src() != Transaction::src_t::READ) {
io_stat_t retire_stat;
for (auto &i: t.retired_set) {
retire_stat.increment(i->get_length());
}
efforts.retire.increment_stat(retire_stat);
auto& fresh_stat = t.get_fresh_block_stats();
efforts.fresh.increment_stat(fresh_stat);
io_stat_t delta_stat;
for (auto &i: t.mutated_block_list) {
if (!i->is_valid()) {
continue;
}
efforts.mutate.increment(i->get_length());
delta_stat.increment(i->get_delta().length());
}
efforts.mutate_delta_bytes += delta_stat.bytes;
for (auto &i: t.pre_alloc_list) {
epm.mark_space_free(i->get_paddr(), i->get_length());
}
auto& ool_stats = t.get_ool_write_stats();
efforts.fresh_ool_written.increment_stat(ool_stats.extents);
efforts.num_ool_records += ool_stats.num_records;
auto ool_record_bytes = (ool_stats.md_bytes + ool_stats.get_data_bytes());
efforts.ool_record_bytes += ool_record_bytes;
if (is_background_transaction(t.get_src())) {
// CLEANER transaction won't contain any onode/omap tree operations
assert(t.onode_tree_stats.is_clear());
assert(t.omap_tree_stats.is_clear());
} else {
get_by_src(stats.invalidated_onode_tree_efforts, t.get_src()
).increment(t.onode_tree_stats);
get_by_src(stats.invalidated_omap_tree_efforts, t.get_src()
).increment(t.omap_tree_stats);
}
get_by_src(stats.invalidated_lba_tree_efforts, t.get_src()
).increment(t.lba_tree_stats);
get_by_src(stats.invalidated_backref_tree_efforts, t.get_src()
).increment(t.backref_tree_stats);
SUBDEBUGT(seastore_t,
"discard {} read, {} fresh, {} delta, {} retire, {}({}B) ool-records",
t,
read_stat,
fresh_stat,
delta_stat,
retire_stat,
ool_stats.num_records,
ool_record_bytes);
} else {
// read transaction won't have non-read efforts
assert(t.retired_set.empty());
assert(t.get_fresh_block_stats().is_clear());
assert(t.mutated_block_list.empty());
assert(t.get_ool_write_stats().is_clear());
assert(t.onode_tree_stats.is_clear());
assert(t.omap_tree_stats.is_clear());
assert(t.lba_tree_stats.is_clear());
assert(t.backref_tree_stats.is_clear());
SUBDEBUGT(seastore_t, "discard {} read", t, read_stat);
}
}
void Cache::on_transaction_destruct(Transaction& t)
{
LOG_PREFIX(Cache::on_transaction_destruct);
SUBTRACET(seastore_t, "", t);
if (t.get_src() == Transaction::src_t::READ &&
t.conflicted == false) {
io_stat_t read_stat;
for (auto &i: t.read_set) {
read_stat.increment(i.ref->get_length());
}
SUBDEBUGT(seastore_t, "done {} read", t, read_stat);
if (!t.is_weak()) {
// exclude weak transaction as it is impossible to conflict
++stats.success_read_efforts.num_trans;
stats.success_read_efforts.read.increment_stat(read_stat);
}
// read transaction won't have non-read efforts
assert(t.retired_set.empty());
assert(t.get_fresh_block_stats().is_clear());
assert(t.mutated_block_list.empty());
assert(t.onode_tree_stats.is_clear());
assert(t.omap_tree_stats.is_clear());
assert(t.lba_tree_stats.is_clear());
assert(t.backref_tree_stats.is_clear());
}
}
CachedExtentRef Cache::alloc_new_extent_by_type(
Transaction &t, ///< [in, out] current transaction
extent_types_t type, ///< [in] type tag
extent_len_t length, ///< [in] length
placement_hint_t hint, ///< [in] user hint
rewrite_gen_t gen ///< [in] rewrite generation
)
{
LOG_PREFIX(Cache::alloc_new_extent_by_type);
SUBDEBUGT(seastore_cache, "allocate {} {}B, hint={}, gen={}",
t, type, length, hint, rewrite_gen_printer_t{gen});
switch (type) {
case extent_types_t::ROOT:
ceph_assert(0 == "ROOT is never directly alloc'd");
return CachedExtentRef();
case extent_types_t::LADDR_INTERNAL:
return alloc_new_extent<lba_manager::btree::LBAInternalNode>(t, length, hint, gen);
case extent_types_t::LADDR_LEAF:
return alloc_new_extent<lba_manager::btree::LBALeafNode>(
t, length, hint, gen);
case extent_types_t::ONODE_BLOCK_STAGED:
return alloc_new_extent<onode::SeastoreNodeExtent>(t, length, hint, gen);
case extent_types_t::OMAP_INNER:
return alloc_new_extent<omap_manager::OMapInnerNode>(t, length, hint, gen);
case extent_types_t::OMAP_LEAF:
return alloc_new_extent<omap_manager::OMapLeafNode>(t, length, hint, gen);
case extent_types_t::COLL_BLOCK:
return alloc_new_extent<collection_manager::CollectionNode>(t, length, hint, gen);
case extent_types_t::OBJECT_DATA_BLOCK:
return alloc_new_extent<ObjectDataBlock>(t, length, hint, gen);
case extent_types_t::RETIRED_PLACEHOLDER:
ceph_assert(0 == "impossible");
return CachedExtentRef();
case extent_types_t::TEST_BLOCK:
return alloc_new_extent<TestBlock>(t, length, hint, gen);
case extent_types_t::TEST_BLOCK_PHYSICAL:
return alloc_new_extent<TestBlockPhysical>(t, length, hint, gen);
case extent_types_t::NONE: {
ceph_assert(0 == "NONE is an invalid extent type");
return CachedExtentRef();
}
default:
ceph_assert(0 == "impossible");
return CachedExtentRef();
}
}
CachedExtentRef Cache::duplicate_for_write(
Transaction &t,
CachedExtentRef i) {
LOG_PREFIX(Cache::duplicate_for_write);
assert(i->is_fully_loaded());
if (i->is_mutable())
return i;
if (i->is_exist_clean()) {
i->version++;
i->state = CachedExtent::extent_state_t::EXIST_MUTATION_PENDING;
i->last_committed_crc = i->get_crc32c();
// deepcopy the buffer of exist clean extent beacuse it shares
// buffer with original clean extent.
auto bp = i->get_bptr();
auto nbp = ceph::bufferptr(bp.c_str(), bp.length());
i->set_bptr(std::move(nbp));
t.add_mutated_extent(i);
DEBUGT("duplicate existing extent {}", t, *i);
return i;
}
auto ret = i->duplicate_for_write(t);
ret->pending_for_transaction = t.get_trans_id();
ret->prior_instance = i;
// duplicate_for_write won't occur after ool write finished
assert(!i->prior_poffset);
auto [iter, inserted] = i->mutation_pendings.insert(*ret);
ceph_assert(inserted);
t.add_mutated_extent(ret);
if (ret->get_type() == extent_types_t::ROOT) {
t.root = ret->cast<RootBlock>();
} else {
ret->last_committed_crc = i->last_committed_crc;
}
ret->version++;
ret->state = CachedExtent::extent_state_t::MUTATION_PENDING;
DEBUGT("{} -> {}", t, *i, *ret);
return ret;
}
record_t Cache::prepare_record(
Transaction &t,
const journal_seq_t &journal_head,
const journal_seq_t &journal_dirty_tail)
{
LOG_PREFIX(Cache::prepare_record);
SUBTRACET(seastore_t, "enter", t);
auto trans_src = t.get_src();
assert(!t.is_weak());
assert(trans_src != Transaction::src_t::READ);
auto& efforts = get_by_src(stats.committed_efforts_by_src,
trans_src);
// Should be valid due to interruptible future
io_stat_t read_stat;
for (auto &i: t.read_set) {
if (!i.ref->is_valid()) {
SUBERRORT(seastore_t,
"read_set got invalid extent, aborting -- {}", t, *i.ref);
ceph_abort("no invalid extent allowed in transactions' read_set");
}
get_by_ext(efforts.read_by_ext,
i.ref->get_type()).increment(i.ref->get_length());
read_stat.increment(i.ref->get_length());
}
t.read_set.clear();
t.write_set.clear();
record_t record(trans_src);
auto commit_time = seastar::lowres_system_clock::now();
// Add new copy of mutated blocks, set_io_wait to block until written
record.deltas.reserve(t.mutated_block_list.size());
io_stat_t delta_stat;
for (auto &i: t.mutated_block_list) {
if (!i->is_valid()) {
DEBUGT("invalid mutated extent -- {}", t, *i);
continue;
}
assert(i->is_exist_mutation_pending() ||
i->prior_instance);
get_by_ext(efforts.mutate_by_ext,
i->get_type()).increment(i->get_length());
auto delta_bl = i->get_delta();
auto delta_length = delta_bl.length();
i->set_modify_time(commit_time);
DEBUGT("mutated extent with {}B delta -- {}",
t, delta_length, *i);
if (!i->is_exist_mutation_pending()) {
DEBUGT("commit replace extent ... -- {}, prior={}",
t, *i, *i->prior_instance);
// extent with EXIST_MUTATION_PENDING doesn't have
// prior_instance field so skip these extents.
// the existing extents should be added into Cache
// during complete_commit to sync with gc transaction.
commit_replace_extent(t, i, i->prior_instance);
}
i->prepare_write();
i->set_io_wait();
i->prepare_commit();
assert(i->get_version() > 0);
auto final_crc = i->get_crc32c();
if (i->get_type() == extent_types_t::ROOT) {
SUBTRACET(seastore_t, "writing out root delta {}B -- {}",
t, delta_length, *i);
assert(t.root == i);
root = t.root;
record.push_back(
delta_info_t{
extent_types_t::ROOT,
P_ADDR_NULL,
L_ADDR_NULL,
0,
0,
0,
t.root->get_version() - 1,
MAX_SEG_SEQ,
segment_type_t::NULL_SEG,
std::move(delta_bl)
});
} else {
auto sseq = NULL_SEG_SEQ;
auto stype = segment_type_t::NULL_SEG;
// FIXME: This is specific to the segmented implementation
if (i->get_paddr().get_addr_type() == paddr_types_t::SEGMENT) {
auto sid = i->get_paddr().as_seg_paddr().get_segment_id();
auto sinfo = get_segment_info(sid);
if (sinfo) {
sseq = sinfo->seq;
stype = sinfo->type;
}
}
record.push_back(
delta_info_t{
i->get_type(),
i->get_paddr(),
(i->is_logical()
? i->cast<LogicalCachedExtent>()->get_laddr()
: L_ADDR_NULL),
i->last_committed_crc,
final_crc,
i->get_length(),
i->get_version() - 1,
sseq,
stype,
std::move(delta_bl)
});
i->last_committed_crc = final_crc;
}
assert(delta_length);
get_by_ext(efforts.delta_bytes_by_ext,
i->get_type()) += delta_length;
delta_stat.increment(delta_length);
}
// Transaction is now a go, set up in-memory cache state
// invalidate now invalid blocks
io_stat_t retire_stat;
std::vector<alloc_delta_t> alloc_deltas;
alloc_delta_t rel_delta;
rel_delta.op = alloc_delta_t::op_types_t::CLEAR;
for (auto &i: t.retired_set) {
get_by_ext(efforts.retire_by_ext,
i->get_type()).increment(i->get_length());
retire_stat.increment(i->get_length());
DEBUGT("retired and remove extent -- {}", t, *i);
commit_retire_extent(t, i);
if (is_backref_mapped_extent_node(i)
|| is_retired_placeholder(i->get_type())) {
rel_delta.alloc_blk_ranges.emplace_back(
i->get_paddr(),
L_ADDR_NULL,
i->get_length(),
i->get_type());
}
}
alloc_deltas.emplace_back(std::move(rel_delta));
record.extents.reserve(t.inline_block_list.size());
io_stat_t fresh_stat;
io_stat_t fresh_invalid_stat;
alloc_delta_t alloc_delta;
alloc_delta.op = alloc_delta_t::op_types_t::SET;
for (auto &i: t.inline_block_list) {
if (!i->is_valid()) {
DEBUGT("invalid fresh inline extent -- {}", t, *i);
fresh_invalid_stat.increment(i->get_length());
get_by_ext(efforts.fresh_invalid_by_ext,
i->get_type()).increment(i->get_length());
} else {
TRACET("fresh inline extent -- {}", t, *i);
}
fresh_stat.increment(i->get_length());
get_by_ext(efforts.fresh_inline_by_ext,
i->get_type()).increment(i->get_length());
assert(i->is_inline() || i->get_paddr().is_fake());
bufferlist bl;
i->prepare_write();
i->prepare_commit();
bl.append(i->get_bptr());
if (i->get_type() == extent_types_t::ROOT) {
ceph_assert(0 == "ROOT never gets written as a fresh block");
}
assert(bl.length() == i->get_length());
auto modify_time = i->get_modify_time();
if (modify_time == NULL_TIME) {
modify_time = commit_time;
}
record.push_back(extent_t{
i->get_type(),
i->is_logical()
? i->cast<LogicalCachedExtent>()->get_laddr()
: (is_lba_node(i->get_type())
? i->cast<lba_manager::btree::LBANode>()->get_node_meta().begin
: L_ADDR_NULL),
std::move(bl)
},
modify_time);
if (i->is_valid()
&& is_backref_mapped_extent_node(i)) {
alloc_delta.alloc_blk_ranges.emplace_back(
i->get_paddr(),
i->is_logical()
? i->cast<LogicalCachedExtent>()->get_laddr()
: (is_lba_node(i->get_type())
? i->cast<lba_manager::btree::LBANode>()->get_node_meta().begin
: L_ADDR_NULL),
i->get_length(),
i->get_type());
}
}
for (auto &i: t.written_ool_block_list) {
TRACET("fresh ool extent -- {}", t, *i);
ceph_assert(i->is_valid());
assert(!i->is_inline());
get_by_ext(efforts.fresh_ool_by_ext,
i->get_type()).increment(i->get_length());
i->prepare_commit();
if (is_backref_mapped_extent_node(i)) {
alloc_delta.alloc_blk_ranges.emplace_back(
i->get_paddr(),
i->is_logical()
? i->cast<LogicalCachedExtent>()->get_laddr()
: i->cast<lba_manager::btree::LBANode>()->get_node_meta().begin,
i->get_length(),
i->get_type());
}
}
for (auto &i: t.existing_block_list) {
if (i->is_valid()) {
alloc_delta.alloc_blk_ranges.emplace_back(
i->get_paddr(),
i->cast<LogicalCachedExtent>()->get_laddr(),
i->get_length(),
i->get_type());
}
}
alloc_deltas.emplace_back(std::move(alloc_delta));
for (auto b : alloc_deltas) {
bufferlist bl;
encode(b, bl);
delta_info_t delta;
delta.type = extent_types_t::ALLOC_INFO;
delta.bl = bl;
record.push_back(std::move(delta));
}
if (is_background_transaction(trans_src)) {
assert(journal_head != JOURNAL_SEQ_NULL);
assert(journal_dirty_tail != JOURNAL_SEQ_NULL);
journal_seq_t dirty_tail;
auto maybe_dirty_tail = get_oldest_dirty_from();
if (!maybe_dirty_tail.has_value()) {
dirty_tail = journal_head;
SUBINFOT(seastore_t, "dirty_tail all trimmed, set to head {}, src={}",
t, dirty_tail, trans_src);
} else if (*maybe_dirty_tail == JOURNAL_SEQ_NULL) {
dirty_tail = journal_dirty_tail;
SUBINFOT(seastore_t, "dirty_tail is pending, set to {}, src={}",
t, dirty_tail, trans_src);
} else {
dirty_tail = *maybe_dirty_tail;
}
ceph_assert(dirty_tail != JOURNAL_SEQ_NULL);
journal_seq_t alloc_tail;
auto maybe_alloc_tail = get_oldest_backref_dirty_from();
if (!maybe_alloc_tail.has_value()) {
// FIXME: the replay point of the allocations requires to be accurate.
// Setting the alloc_tail to get_journal_head() cannot skip replaying the
// last unnecessary record.
alloc_tail = journal_head;
SUBINFOT(seastore_t, "alloc_tail all trimmed, set to head {}, src={}",
t, alloc_tail, trans_src);
} else if (*maybe_alloc_tail == JOURNAL_SEQ_NULL) {
ceph_abort("impossible");
} else {
alloc_tail = *maybe_alloc_tail;
}
ceph_assert(alloc_tail != JOURNAL_SEQ_NULL);
auto tails = journal_tail_delta_t{alloc_tail, dirty_tail};
SUBDEBUGT(seastore_t, "update tails as delta {}", t, tails);
bufferlist bl;
encode(tails, bl);
delta_info_t delta;
delta.type = extent_types_t::JOURNAL_TAIL;
delta.bl = bl;
record.push_back(std::move(delta));
}
ceph_assert(t.get_fresh_block_stats().num ==
t.inline_block_list.size() +
t.written_ool_block_list.size() +
t.num_delayed_invalid_extents +
t.num_allocated_invalid_extents);
auto& ool_stats = t.get_ool_write_stats();
ceph_assert(ool_stats.extents.num == t.written_ool_block_list.size());
if (record.is_empty()) {
SUBINFOT(seastore_t,
"record to submit is empty, src={}", t, trans_src);
assert(t.onode_tree_stats.is_clear());
assert(t.omap_tree_stats.is_clear());
assert(t.lba_tree_stats.is_clear());
assert(t.backref_tree_stats.is_clear());
assert(ool_stats.is_clear());
}
if (record.modify_time == NULL_TIME) {
record.modify_time = commit_time;
}
SUBDEBUGT(seastore_t,
"commit H{} dirty_from={}, alloc_from={}, "
"{} read, {} fresh with {} invalid, "
"{} delta, {} retire, {}(md={}B, data={}B) ool-records, "
"{}B md, {}B data, modify_time={}",
t, (void*)&t.get_handle(),
get_oldest_dirty_from().value_or(JOURNAL_SEQ_NULL),
get_oldest_backref_dirty_from().value_or(JOURNAL_SEQ_NULL),
read_stat,
fresh_stat,
fresh_invalid_stat,
delta_stat,
retire_stat,
ool_stats.num_records,
ool_stats.md_bytes,
ool_stats.get_data_bytes(),
record.size.get_raw_mdlength(),
record.size.dlength,
sea_time_point_printer_t{record.modify_time});
if (is_background_transaction(trans_src)) {
// background transaction won't contain any onode tree operations
assert(t.onode_tree_stats.is_clear());
assert(t.omap_tree_stats.is_clear());
} else {
if (t.onode_tree_stats.depth) {
stats.onode_tree_depth = t.onode_tree_stats.depth;
}
if (t.omap_tree_stats.depth) {
stats.omap_tree_depth = t.omap_tree_stats.depth;
}
stats.onode_tree_extents_num += t.onode_tree_stats.extents_num_delta;
ceph_assert(stats.onode_tree_extents_num >= 0);
get_by_src(stats.committed_onode_tree_efforts, trans_src
).increment(t.onode_tree_stats);
stats.omap_tree_extents_num += t.omap_tree_stats.extents_num_delta;
ceph_assert(stats.omap_tree_extents_num >= 0);
get_by_src(stats.committed_omap_tree_efforts, trans_src
).increment(t.omap_tree_stats);
}
if (t.lba_tree_stats.depth) {
stats.lba_tree_depth = t.lba_tree_stats.depth;
}
stats.lba_tree_extents_num += t.lba_tree_stats.extents_num_delta;
ceph_assert(stats.lba_tree_extents_num >= 0);
get_by_src(stats.committed_lba_tree_efforts, trans_src
).increment(t.lba_tree_stats);
if (t.backref_tree_stats.depth) {
stats.backref_tree_depth = t.backref_tree_stats.depth;
}
stats.backref_tree_extents_num += t.backref_tree_stats.extents_num_delta;
ceph_assert(stats.backref_tree_extents_num >= 0);
get_by_src(stats.committed_backref_tree_efforts, trans_src
).increment(t.backref_tree_stats);
++(efforts.num_trans);
efforts.num_ool_records += ool_stats.num_records;
efforts.ool_record_metadata_bytes += ool_stats.md_bytes;
efforts.ool_record_data_bytes += ool_stats.get_data_bytes();
efforts.inline_record_metadata_bytes +=
(record.size.get_raw_mdlength() - record.get_delta_size());
auto &rewrite_version_stats = t.get_rewrite_version_stats();
if (trans_src == Transaction::src_t::TRIM_DIRTY) {
stats.committed_dirty_version.increment_stat(rewrite_version_stats);
} else if (trans_src == Transaction::src_t::CLEANER_MAIN ||
trans_src == Transaction::src_t::CLEANER_COLD) {
stats.committed_reclaim_version.increment_stat(rewrite_version_stats);
} else {
assert(rewrite_version_stats.is_clear());
}
return record;
}
void Cache::backref_batch_update(
std::vector<backref_entry_ref> &&list,
const journal_seq_t &seq)
{
LOG_PREFIX(Cache::backref_batch_update);
DEBUG("inserting {} entries at {}", list.size(), seq);
ceph_assert(seq != JOURNAL_SEQ_NULL);
for (auto &ent : list) {
backref_entry_mset.insert(*ent);
}
auto iter = backref_entryrefs_by_seq.find(seq);
if (iter == backref_entryrefs_by_seq.end()) {
backref_entryrefs_by_seq.emplace(seq, std::move(list));
} else {
iter->second.insert(
iter->second.end(),
std::make_move_iterator(list.begin()),
std::make_move_iterator(list.end()));
}
}
void Cache::complete_commit(
Transaction &t,
paddr_t final_block_start,
journal_seq_t start_seq)
{
LOG_PREFIX(Cache::complete_commit);
SUBTRACET(seastore_t, "final_block_start={}, start_seq={}",
t, final_block_start, start_seq);
std::vector<backref_entry_ref> backref_list;
t.for_each_fresh_block([&](const CachedExtentRef &i) {
if (!i->is_valid()) {
return;
}
bool is_inline = false;
if (i->is_inline()) {
is_inline = true;
i->set_paddr(final_block_start.add_relative(i->get_paddr()));
}
i->last_committed_crc = i->get_crc32c();
i->pending_for_transaction = TRANS_ID_NULL;
i->on_initial_write();
i->state = CachedExtent::extent_state_t::CLEAN;
DEBUGT("add extent as fresh, inline={} -- {}",
t, is_inline, *i);
const auto t_src = t.get_src();
i->invalidate_hints();
add_extent(i, &t_src);
epm.commit_space_used(i->get_paddr(), i->get_length());
if (is_backref_mapped_extent_node(i)) {
DEBUGT("backref_list new {} len {}",
t,
i->get_paddr(),
i->get_length());
backref_list.emplace_back(
std::make_unique<backref_entry_t>(
i->get_paddr(),
i->is_logical()
? i->cast<LogicalCachedExtent>()->get_laddr()
: (is_lba_node(i->get_type())
? i->cast<lba_manager::btree::LBANode>()->get_node_meta().begin
: L_ADDR_NULL),
i->get_length(),
i->get_type(),
start_seq));
} else if (is_backref_node(i->get_type())) {
add_backref_extent(
i->get_paddr(),
i->cast<backref::BackrefNode>()->get_node_meta().begin,
i->get_type());
} else {
ERRORT("{}", t, *i);
ceph_abort("not possible");
}
});
// Add new copy of mutated blocks, set_io_wait to block until written
for (auto &i: t.mutated_block_list) {
if (!i->is_valid()) {
continue;
}
assert(i->is_exist_mutation_pending() ||
i->prior_instance);
i->on_delta_write(final_block_start);
i->pending_for_transaction = TRANS_ID_NULL;
i->prior_instance = CachedExtentRef();
i->state = CachedExtent::extent_state_t::DIRTY;
assert(i->version > 0);
if (i->version == 1 || i->get_type() == extent_types_t::ROOT) {
i->dirty_from_or_retired_at = start_seq;
DEBUGT("commit extent done, become dirty -- {}", t, *i);
} else {
DEBUGT("commit extent done -- {}", t, *i);
}
}
for (auto &i: t.retired_set) {
epm.mark_space_free(i->get_paddr(), i->get_length());
}
for (auto &i: t.existing_block_list) {
if (i->is_valid()) {
epm.mark_space_used(i->get_paddr(), i->get_length());
}
}
for (auto &i: t.mutated_block_list) {
if (!i->is_valid()) {
continue;
}
i->complete_io();
}
last_commit = start_seq;
for (auto &i: t.retired_set) {
i->dirty_from_or_retired_at = start_seq;
if (is_backref_mapped_extent_node(i)
|| is_retired_placeholder(i->get_type())) {
DEBUGT("backref_list free {} len {}",
t,
i->get_paddr(),
i->get_length());
backref_list.emplace_back(
std::make_unique<backref_entry_t>(
i->get_paddr(),
L_ADDR_NULL,
i->get_length(),
i->get_type(),
start_seq));
} else if (is_backref_node(i->get_type())) {
remove_backref_extent(i->get_paddr());
} else {
ERRORT("{}", t, *i);
ceph_abort("not possible");
}
}
auto existing_stats = t.get_existing_block_stats();
DEBUGT("total existing blocks num: {}, exist clean num: {}, "
"exist mutation pending num: {}",
t,
existing_stats.valid_num,
existing_stats.clean_num,
existing_stats.mutated_num);
for (auto &i: t.existing_block_list) {
if (i->is_valid()) {
if (i->is_exist_clean()) {
i->state = CachedExtent::extent_state_t::CLEAN;
} else {
assert(i->state == CachedExtent::extent_state_t::DIRTY);
}
DEBUGT("backref_list new existing {} len {}",
t,
i->get_paddr(),
i->get_length());
backref_list.emplace_back(
std::make_unique<backref_entry_t>(
i->get_paddr(),
i->cast<LogicalCachedExtent>()->get_laddr(),
i->get_length(),
i->get_type(),
start_seq));
const auto t_src = t.get_src();
add_extent(i, &t_src);
}
}
if (!backref_list.empty()) {
backref_batch_update(std::move(backref_list), start_seq);
}
for (auto &i: t.pre_alloc_list) {
if (!i->is_valid()) {
epm.mark_space_free(i->get_paddr(), i->get_length());
}
}
}
void Cache::init()
{
LOG_PREFIX(Cache::init);
if (root) {
// initial creation will do mkfs followed by mount each of which calls init
DEBUG("remove extent -- prv_root={}", *root);
remove_extent(root);
root = nullptr;
}
root = new RootBlock();
root->init(CachedExtent::extent_state_t::CLEAN,
P_ADDR_ROOT,
PLACEMENT_HINT_NULL,
NULL_GENERATION,
TRANS_ID_NULL);
INFO("init root -- {}", *root);
extents.insert(*root);
}
Cache::mkfs_iertr::future<> Cache::mkfs(Transaction &t)
{
LOG_PREFIX(Cache::mkfs);
INFOT("create root", t);
return get_root(t).si_then([this, &t](auto croot) {
duplicate_for_write(t, croot);
return mkfs_iertr::now();
}).handle_error_interruptible(
mkfs_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in Cache::mkfs"
}
);
}
Cache::close_ertr::future<> Cache::close()
{
LOG_PREFIX(Cache::close);
INFO("close with {}({}B) dirty, dirty_from={}, alloc_from={}, "
"{}({}B) lru, totally {}({}B) indexed extents",
dirty.size(),
stats.dirty_bytes,
get_oldest_dirty_from().value_or(JOURNAL_SEQ_NULL),
get_oldest_backref_dirty_from().value_or(JOURNAL_SEQ_NULL),
lru.get_current_contents_extents(),
lru.get_current_contents_bytes(),
extents.size(),
extents.get_bytes());
root.reset();
for (auto i = dirty.begin(); i != dirty.end(); ) {
auto ptr = &*i;
stats.dirty_bytes -= ptr->get_length();
dirty.erase(i++);
intrusive_ptr_release(ptr);
}
backref_extents.clear();
backref_entryrefs_by_seq.clear();
assert(stats.dirty_bytes == 0);
lru.clear();
return close_ertr::now();
}
Cache::replay_delta_ret
Cache::replay_delta(
journal_seq_t journal_seq,
paddr_t record_base,
const delta_info_t &delta,
const journal_seq_t &dirty_tail,
const journal_seq_t &alloc_tail,
sea_time_point modify_time)
{
LOG_PREFIX(Cache::replay_delta);
assert(dirty_tail != JOURNAL_SEQ_NULL);
assert(alloc_tail != JOURNAL_SEQ_NULL);
ceph_assert(modify_time != NULL_TIME);
// FIXME: This is specific to the segmented implementation
/* The journal may validly contain deltas for extents in
* since released segments. We can detect those cases by
* checking whether the segment in question currently has a
* sequence number > the current journal segment seq. We can
* safetly skip these deltas because the extent must already
* have been rewritten.
*/
if (delta.paddr != P_ADDR_NULL &&
delta.paddr.get_addr_type() == paddr_types_t::SEGMENT) {
auto& seg_addr = delta.paddr.as_seg_paddr();
auto seg_info = get_segment_info(seg_addr.get_segment_id());
if (seg_info) {
auto delta_paddr_segment_seq = seg_info->seq;
auto delta_paddr_segment_type = seg_info->type;
if (delta_paddr_segment_seq != delta.ext_seq ||
delta_paddr_segment_type != delta.seg_type) {
DEBUG("delta is obsolete, delta_paddr_segment_seq={},"
" delta_paddr_segment_type={} -- {}",
segment_seq_printer_t{delta_paddr_segment_seq},
delta_paddr_segment_type,
delta);
return replay_delta_ertr::make_ready_future<bool>(false);
}
}
}
if (delta.type == extent_types_t::JOURNAL_TAIL) {
// this delta should have been dealt with during segment cleaner mounting
return replay_delta_ertr::make_ready_future<bool>(false);
}
// replay alloc
if (delta.type == extent_types_t::ALLOC_INFO) {
if (journal_seq < alloc_tail) {
DEBUG("journal_seq {} < alloc_tail {}, don't replay {}",
journal_seq, alloc_tail, delta);
return replay_delta_ertr::make_ready_future<bool>(false);
}
alloc_delta_t alloc_delta;
decode(alloc_delta, delta.bl);
std::vector<backref_entry_ref> backref_list;
for (auto &alloc_blk : alloc_delta.alloc_blk_ranges) {
if (alloc_blk.paddr.is_relative()) {
assert(alloc_blk.paddr.is_record_relative());
alloc_blk.paddr = record_base.add_relative(alloc_blk.paddr);
}
DEBUG("replay alloc_blk {}~{} {}, journal_seq: {}",
alloc_blk.paddr, alloc_blk.len, alloc_blk.laddr, journal_seq);
backref_list.emplace_back(
std::make_unique<backref_entry_t>(
alloc_blk.paddr,
alloc_blk.laddr,
alloc_blk.len,
alloc_blk.type,
journal_seq));
}
if (!backref_list.empty()) {
backref_batch_update(std::move(backref_list), journal_seq);
}
return replay_delta_ertr::make_ready_future<bool>(true);
}
// replay dirty
if (journal_seq < dirty_tail) {
DEBUG("journal_seq {} < dirty_tail {}, don't replay {}",
journal_seq, dirty_tail, delta);
return replay_delta_ertr::make_ready_future<bool>(false);
}
if (delta.type == extent_types_t::ROOT) {
TRACE("replay root delta at {} {}, remove extent ... -- {}, prv_root={}",
journal_seq, record_base, delta, *root);
remove_extent(root);
root->apply_delta_and_adjust_crc(record_base, delta.bl);
root->dirty_from_or_retired_at = journal_seq;
root->state = CachedExtent::extent_state_t::DIRTY;
DEBUG("replayed root delta at {} {}, add extent -- {}, root={}",
journal_seq, record_base, delta, *root);
root->set_modify_time(modify_time);
add_extent(root);
return replay_delta_ertr::make_ready_future<bool>(true);
} else {
auto _get_extent_if_cached = [this](paddr_t addr)
-> get_extent_ertr::future<CachedExtentRef> {
// replay is not included by the cache hit metrics
auto ret = query_cache(addr, nullptr);
if (ret) {
// no retired-placeholder should be exist yet because no transaction
// has been created.
assert(ret->get_type() != extent_types_t::RETIRED_PLACEHOLDER);
return ret->wait_io().then([ret] {
return ret;
});
} else {
return seastar::make_ready_future<CachedExtentRef>();
}
};
auto extent_fut = (delta.pversion == 0 ?
// replay is not included by the cache hit metrics
_get_extent_by_type(
delta.type,
delta.paddr,
delta.laddr,
delta.length,
nullptr,
[](CachedExtent &) {},
[](CachedExtent &) {}) :
_get_extent_if_cached(
delta.paddr)
).handle_error(
replay_delta_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in Cache::replay_delta"
}
);
return extent_fut.safe_then([=, this, &delta](auto extent) {
if (!extent) {
DEBUG("replay extent is not present, so delta is obsolete at {} {} -- {}",
journal_seq, record_base, delta);
assert(delta.pversion > 0);
return replay_delta_ertr::make_ready_future<bool>(true);
}
DEBUG("replay extent delta at {} {} ... -- {}, prv_extent={}",
journal_seq, record_base, delta, *extent);
assert(extent->last_committed_crc == delta.prev_crc);
assert(extent->version == delta.pversion);
extent->apply_delta_and_adjust_crc(record_base, delta.bl);
extent->set_modify_time(modify_time);
assert(extent->last_committed_crc == delta.final_crc);
extent->version++;
if (extent->version == 1) {
extent->dirty_from_or_retired_at = journal_seq;
DEBUG("replayed extent delta at {} {}, become dirty -- {}, extent={}" ,
journal_seq, record_base, delta, *extent);
} else {
DEBUG("replayed extent delta at {} {} -- {}, extent={}" ,
journal_seq, record_base, delta, *extent);
}
mark_dirty(extent);
return replay_delta_ertr::make_ready_future<bool>(true);
});
}
}
Cache::get_next_dirty_extents_ret Cache::get_next_dirty_extents(
Transaction &t,
journal_seq_t seq,
size_t max_bytes)
{
LOG_PREFIX(Cache::get_next_dirty_extents);
if (dirty.empty()) {
DEBUGT("max_bytes={}B, seq={}, dirty is empty",
t, max_bytes, seq);
} else {
DEBUGT("max_bytes={}B, seq={}, dirty_from={}",
t, max_bytes, seq, dirty.begin()->get_dirty_from());
}
std::vector<CachedExtentRef> cand;
size_t bytes_so_far = 0;
for (auto i = dirty.begin();
i != dirty.end() && bytes_so_far < max_bytes;
++i) {
auto dirty_from = i->get_dirty_from();
//dirty extents must be fully loaded
assert(i->is_fully_loaded());
if (unlikely(dirty_from == JOURNAL_SEQ_NULL)) {
ERRORT("got dirty extent with JOURNAL_SEQ_NULL -- {}", t, *i);
ceph_abort();
}
if (dirty_from < seq) {
TRACET("next extent -- {}", t, *i);
if (!cand.empty() && cand.back()->get_dirty_from() > dirty_from) {
ERRORT("dirty extents are not ordered by dirty_from -- last={}, next={}",
t, *cand.back(), *i);
ceph_abort();
}
bytes_so_far += i->get_length();
cand.push_back(&*i);
} else {
break;
}
}
return seastar::do_with(
std::move(cand),
decltype(cand)(),
[FNAME, this, &t](auto &cand, auto &ret) {
return trans_intr::do_for_each(
cand,
[FNAME, this, &t, &ret](auto &ext) {
TRACET("waiting on extent -- {}", t, *ext);
return trans_intr::make_interruptible(
ext->wait_io()
).then_interruptible([FNAME, this, ext, &t, &ret] {
if (!ext->is_valid()) {
++(get_by_src(stats.trans_conflicts_by_unknown, t.get_src()));
mark_transaction_conflicted(t, *ext);
return;
}
CachedExtentRef on_transaction;
auto result = t.get_extent(ext->get_paddr(), &on_transaction);
if (result == Transaction::get_extent_ret::ABSENT) {
DEBUGT("extent is absent on t -- {}", t, *ext);
t.add_to_read_set(ext);
if (ext->get_type() == extent_types_t::ROOT) {
if (t.root) {
assert(&*t.root == &*ext);
ceph_assert(0 == "t.root would have to already be in the read set");
} else {
assert(&*ext == &*root);
t.root = root;
}
}
ret.push_back(ext);
} else if (result == Transaction::get_extent_ret::PRESENT) {
DEBUGT("extent is present on t -- {}, on t {}", t, *ext, *on_transaction);
ret.push_back(on_transaction);
} else {
assert(result == Transaction::get_extent_ret::RETIRED);
DEBUGT("extent is retired on t -- {}", t, *ext);
}
});
}).then_interruptible([&ret] {
return std::move(ret);
});
});
}
Cache::get_root_ret Cache::get_root(Transaction &t)
{
LOG_PREFIX(Cache::get_root);
if (t.root) {
TRACET("root already on t -- {}", t, *t.root);
return t.root->wait_io().then([&t] {
return get_root_iertr::make_ready_future<RootBlockRef>(
t.root);
});
} else {
DEBUGT("root not on t -- {}", t, *root);
t.root = root;
t.add_to_read_set(root);
return root->wait_io().then([root=root] {
return get_root_iertr::make_ready_future<RootBlockRef>(
root);
});
}
}
Cache::get_extent_ertr::future<CachedExtentRef> Cache::_get_extent_by_type(
extent_types_t type,
paddr_t offset,
laddr_t laddr,
extent_len_t length,
const Transaction::src_t* p_src,
extent_init_func_t &&extent_init_func,
extent_init_func_t &&on_cache)
{
return [=, this, extent_init_func=std::move(extent_init_func)]() mutable {
src_ext_t* p_metric_key = nullptr;
src_ext_t metric_key;
if (p_src) {
metric_key = std::make_pair(*p_src, type);
p_metric_key = &metric_key;
}
switch (type) {
case extent_types_t::ROOT:
ceph_assert(0 == "ROOT is never directly read");
return get_extent_ertr::make_ready_future<CachedExtentRef>();
case extent_types_t::BACKREF_INTERNAL:
return get_extent<backref::BackrefInternalNode>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::BACKREF_LEAF:
return get_extent<backref::BackrefLeafNode>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::LADDR_INTERNAL:
return get_extent<lba_manager::btree::LBAInternalNode>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::LADDR_LEAF:
return get_extent<lba_manager::btree::LBALeafNode>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::OMAP_INNER:
return get_extent<omap_manager::OMapInnerNode>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::OMAP_LEAF:
return get_extent<omap_manager::OMapLeafNode>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::COLL_BLOCK:
return get_extent<collection_manager::CollectionNode>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::ONODE_BLOCK_STAGED:
return get_extent<onode::SeastoreNodeExtent>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::OBJECT_DATA_BLOCK:
return get_extent<ObjectDataBlock>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::RETIRED_PLACEHOLDER:
ceph_assert(0 == "impossible");
return get_extent_ertr::make_ready_future<CachedExtentRef>();
case extent_types_t::TEST_BLOCK:
return get_extent<TestBlock>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::TEST_BLOCK_PHYSICAL:
return get_extent<TestBlockPhysical>(
offset, length, p_metric_key, std::move(extent_init_func), std::move(on_cache)
).safe_then([](auto extent) {
return CachedExtentRef(extent.detach(), false /* add_ref */);
});
case extent_types_t::NONE: {
ceph_assert(0 == "NONE is an invalid extent type");
return get_extent_ertr::make_ready_future<CachedExtentRef>();
}
default:
ceph_assert(0 == "impossible");
return get_extent_ertr::make_ready_future<CachedExtentRef>();
}
}().safe_then([laddr](CachedExtentRef e) {
assert(e->is_logical() == (laddr != L_ADDR_NULL));
if (e->is_logical()) {
e->cast<LogicalCachedExtent>()->set_laddr(laddr);
}
return get_extent_ertr::make_ready_future<CachedExtentRef>(e);
});
}
}
| 66,429 | 31.420693 | 92 | cc |
null | ceph-main/src/crimson/os/seastore/cache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include "seastar/core/shared_future.hh"
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/extent_placement_manager.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/root_block.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/transaction.h"
namespace crimson::os::seastore::backref {
class BtreeBackrefManager;
}
namespace crimson::os::seastore {
template <
typename node_key_t,
typename node_val_t,
typename internal_node_t,
typename leaf_node_t,
typename pin_t,
size_t node_size,
bool leaf_has_children>
class FixedKVBtree;
class BackrefManager;
class SegmentProvider;
struct backref_entry_t {
backref_entry_t(
const paddr_t paddr,
const laddr_t laddr,
const extent_len_t len,
const extent_types_t type,
const journal_seq_t seq)
: paddr(paddr),
laddr(laddr),
len(len),
type(type),
seq(seq)
{}
backref_entry_t(alloc_blk_t alloc_blk)
: paddr(alloc_blk.paddr),
laddr(alloc_blk.laddr),
len(alloc_blk.len),
type(alloc_blk.type)
{}
paddr_t paddr = P_ADDR_NULL;
laddr_t laddr = L_ADDR_NULL;
extent_len_t len = 0;
extent_types_t type =
extent_types_t::ROOT;
journal_seq_t seq;
friend bool operator< (
const backref_entry_t &l,
const backref_entry_t &r) {
return l.paddr < r.paddr;
}
friend bool operator> (
const backref_entry_t &l,
const backref_entry_t &r) {
return l.paddr > r.paddr;
}
friend bool operator== (
const backref_entry_t &l,
const backref_entry_t &r) {
return l.paddr == r.paddr;
}
using set_hook_t =
boost::intrusive::set_member_hook<
boost::intrusive::link_mode<
boost::intrusive::auto_unlink>>;
set_hook_t backref_set_hook;
using backref_set_member_options = boost::intrusive::member_hook<
backref_entry_t,
set_hook_t,
&backref_entry_t::backref_set_hook>;
using multiset_t = boost::intrusive::multiset<
backref_entry_t,
backref_set_member_options,
boost::intrusive::constant_time_size<false>>;
struct cmp_t {
using is_transparent = paddr_t;
bool operator()(
const backref_entry_t &l,
const backref_entry_t &r) const {
return l.paddr < r.paddr;
}
bool operator()(const paddr_t l, const backref_entry_t &r) const {
return l < r.paddr;
}
bool operator()(const backref_entry_t &l, const paddr_t r) const {
return l.paddr < r;
}
};
};
std::ostream &operator<<(std::ostream &out, const backref_entry_t &ent);
using backref_entry_ref = std::unique_ptr<backref_entry_t>;
using backref_entry_mset_t = backref_entry_t::multiset_t;
using backref_entry_refs_t = std::vector<backref_entry_ref>;
using backref_entryrefs_by_seq_t = std::map<journal_seq_t, backref_entry_refs_t>;
using backref_entry_query_set_t = std::set<
backref_entry_t, backref_entry_t::cmp_t>;
/**
* Cache
*
* This component is responsible for buffer management, including
* transaction lifecycle.
*
* Seastore transactions are expressed as an atomic combination of
* 1) newly written blocks
* 2) logical mutations to existing physical blocks
*
* See record_t
*
* As such, any transaction has 3 components:
* 1) read_set: references to extents read during the transaction
* See Transaction::read_set
* 2) write_set: references to extents to be written as:
* a) new physical blocks, see Transaction::fresh_block_list
* b) mutations to existing physical blocks,
* see Transaction::mutated_block_list
* 3) retired_set: extent refs to be retired either due to 2b or
* due to releasing the extent generally.
* In the case of 2b, the CachedExtent will have been copied into
* a fresh CachedExtentRef such that the source extent ref is present
* in the read set and the newly allocated extent is present in the
* write_set.
*
* A transaction has 3 phases:
* 1) construction: user calls Cache::get_transaction() and populates
* the returned transaction by calling Cache methods
* 2) submission: user calls Cache::try_start_transaction(). If
* succcessful, the user may construct a record and submit the
* transaction to the journal.
* 3) completion: once the transaction is durable, the user must call
* Cache::complete_commit() with the block offset to complete
* the transaction.
*
* Internally, in phase 1, the fields in Transaction are filled in.
* - reads may block if the referenced extent is being written
* - once a read obtains a particular CachedExtentRef for a paddr_t,
* it'll always get the same one until overwritten
* - once a paddr_t is overwritten or written, subsequent reads of
* that addr will get the new ref
*
* In phase 2, if all extents in the read set are valid (not expired),
* we can commit (otherwise, we fail and the user must retry).
* - Expire all extents in the retired_set (they must all be valid)
* - Remove all extents in the retired_set from Cache::extents
* - Mark all extents in the write_set wait_io(), add promises to
* transaction
* - Merge Transaction::write_set into Cache::extents
*
* After phase 2, the user will submit the record to the journal.
* Once complete, we perform phase 3:
* - For each CachedExtent in block_list, call
* CachedExtent::complete_initial_write(paddr_t) with the block's
* final offset (inferred from the extent's position in the block_list
* and extent lengths).
* - For each block in mutation_list, call
* CachedExtent::delta_written(paddr_t) with the address of the start
* of the record
* - Complete all promises with the final record start paddr_t
*
*
* Cache logs
*
* levels:
* - INFO: major initiation, closing operations
* - DEBUG: major extent related operations, INFO details
* - TRACE: DEBUG details
* - seastore_t logs
*/
class Cache {
public:
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using base_iertr = trans_iertr<base_ertr>;
Cache(ExtentPlacementManager &epm);
~Cache();
/// Creates empty transaction by source
TransactionRef create_transaction(
Transaction::src_t src,
const char* name,
bool is_weak) {
LOG_PREFIX(Cache::create_transaction);
++(get_by_src(stats.trans_created_by_src, src));
auto ret = std::make_unique<Transaction>(
get_dummy_ordering_handle(),
is_weak,
src,
last_commit,
[this](Transaction& t) {
return on_transaction_destruct(t);
},
++next_id
);
SUBDEBUGT(seastore_t, "created name={}, source={}, is_weak={}",
*ret, name, src, is_weak);
assert(!is_weak || src == Transaction::src_t::READ);
return ret;
}
/// Resets transaction preserving
void reset_transaction_preserve_handle(Transaction &t) {
LOG_PREFIX(Cache::reset_transaction_preserve_handle);
if (t.did_reset()) {
SUBTRACET(seastore_t, "reset", t);
++(get_by_src(stats.trans_created_by_src, t.get_src()));
}
t.reset_preserve_handle(last_commit);
}
/// Declare ref retired in t
void retire_extent(Transaction &t, CachedExtentRef ref) {
LOG_PREFIX(Cache::retire_extent);
SUBDEBUGT(seastore_cache, "retire extent -- {}", t, *ref);
t.add_to_retired_set(ref);
}
/// Declare paddr retired in t
using retire_extent_iertr = base_iertr;
using retire_extent_ret = base_iertr::future<>;
retire_extent_ret retire_extent_addr(
Transaction &t, paddr_t addr, extent_len_t length);
/**
* get_root
*
* returns ref to current root or t.root if modified in t
*/
using get_root_iertr = base_iertr;
using get_root_ret = get_root_iertr::future<RootBlockRef>;
get_root_ret get_root(Transaction &t);
/**
* get_root_fast
*
* returns t.root and assume it is already present/read in t
*/
RootBlockRef get_root_fast(Transaction &t) {
LOG_PREFIX(Cache::get_root_fast);
SUBTRACET(seastore_cache, "root already on t -- {}", t, *t.root);
assert(t.root);
return t.root;
}
/**
* get_extent
*
* returns ref to extent at offset~length of type T either from
* - extent_set if already in cache
* - disk
*/
using src_ext_t = std::pair<Transaction::src_t, extent_types_t>;
using get_extent_ertr = base_ertr;
template <typename T>
using get_extent_ret = get_extent_ertr::future<TCachedExtentRef<T>>;
template <typename T, typename Func, typename OnCache>
get_extent_ret<T> get_extent(
paddr_t offset, ///< [in] starting addr
extent_len_t length, ///< [in] length
const src_ext_t* p_src_ext, ///< [in] cache query metric key
Func &&extent_init_func, ///< [in] init func for extent
OnCache &&on_cache
) {
LOG_PREFIX(Cache::get_extent);
auto cached = query_cache(offset, p_src_ext);
if (!cached) {
auto ret = CachedExtent::make_cached_extent_ref<T>(
alloc_cache_buf(length));
ret->init(CachedExtent::extent_state_t::CLEAN_PENDING,
offset,
PLACEMENT_HINT_NULL,
NULL_GENERATION,
TRANS_ID_NULL);
SUBDEBUG(seastore_cache,
"{} {}~{} is absent, add extent and reading ... -- {}",
T::TYPE, offset, length, *ret);
const auto p_src = p_src_ext ? &p_src_ext->first : nullptr;
add_extent(ret, p_src);
on_cache(*ret);
extent_init_func(*ret);
return read_extent<T>(
std::move(ret));
}
// extent PRESENT in cache
if (cached->get_type() == extent_types_t::RETIRED_PLACEHOLDER) {
auto ret = CachedExtent::make_cached_extent_ref<T>(
alloc_cache_buf(length));
ret->init(CachedExtent::extent_state_t::CLEAN_PENDING,
offset,
PLACEMENT_HINT_NULL,
NULL_GENERATION,
TRANS_ID_NULL);
SUBDEBUG(seastore_cache,
"{} {}~{} is absent(placeholder), reading ... -- {}",
T::TYPE, offset, length, *ret);
extents.replace(*ret, *cached);
on_cache(*ret);
// replace placeholder in transactions
while (!cached->transactions.empty()) {
auto t = cached->transactions.begin()->t;
t->replace_placeholder(*cached, *ret);
}
cached->state = CachedExtent::extent_state_t::INVALID;
extent_init_func(*ret);
return read_extent<T>(
std::move(ret));
} else if (!cached->is_fully_loaded()) {
auto ret = TCachedExtentRef<T>(static_cast<T*>(cached.get()));
on_cache(*ret);
SUBDEBUG(seastore_cache,
"{} {}~{} is present without been fully loaded, reading ... -- {}",
T::TYPE, offset, length, *ret);
auto bp = alloc_cache_buf(length);
ret->set_bptr(std::move(bp));
return read_extent<T>(
std::move(ret));
} else {
SUBTRACE(seastore_cache,
"{} {}~{} is present in cache -- {}",
T::TYPE, offset, length, *cached);
auto ret = TCachedExtentRef<T>(static_cast<T*>(cached.get()));
on_cache(*ret);
return ret->wait_io(
).then([ret=std::move(ret)]() mutable
-> get_extent_ret<T> {
// ret may be invalid, caller must check
return get_extent_ret<T>(
get_extent_ertr::ready_future_marker{},
std::move(ret));
});
}
}
template <typename T>
get_extent_ret<T> get_extent(
paddr_t offset, ///< [in] starting addr
extent_len_t length, ///< [in] length
const src_ext_t* p_metric_key ///< [in] cache query metric key
) {
return get_extent<T>(
offset, length, p_metric_key,
[](T &){}, [](T &) {});
}
/**
* get_extent_if_cached
*
* Returns extent at offset if in cache
*/
using get_extent_if_cached_iertr = base_iertr;
using get_extent_if_cached_ret =
get_extent_if_cached_iertr::future<CachedExtentRef>;
get_extent_if_cached_ret get_extent_if_cached(
Transaction &t,
paddr_t offset,
extent_types_t type) {
CachedExtentRef ret;
LOG_PREFIX(Cache::get_extent_if_cached);
auto result = t.get_extent(offset, &ret);
if (result == Transaction::get_extent_ret::RETIRED) {
SUBDEBUGT(seastore_cache, "{} {} is retired on t -- {}",
t, type, offset, *ret);
return get_extent_if_cached_iertr::make_ready_future<
CachedExtentRef>(ret);
} else if (result == Transaction::get_extent_ret::PRESENT) {
if (ret->is_fully_loaded()) {
SUBTRACET(seastore_cache, "{} {} is present on t -- {}",
t, type, offset, *ret);
return ret->wait_io().then([ret] {
return get_extent_if_cached_iertr::make_ready_future<
CachedExtentRef>(ret);
});
} else {
SUBDEBUGT(seastore_cache, "{} {} is present on t -- {}"
" without being fully loaded", t, type, offset, *ret);
return get_extent_if_cached_iertr::make_ready_future<
CachedExtentRef>();
}
}
// get_extent_ret::ABSENT from transaction
auto metric_key = std::make_pair(t.get_src(), type);
ret = query_cache(offset, &metric_key);
if (!ret) {
SUBDEBUGT(seastore_cache, "{} {} is absent", t, type, offset);
return get_extent_if_cached_iertr::make_ready_future<CachedExtentRef>();
} else if (ret->get_type() == extent_types_t::RETIRED_PLACEHOLDER) {
// retired_placeholder is not really cached yet
SUBDEBUGT(seastore_cache, "{} {} is absent(placeholder)",
t, type, offset);
return get_extent_if_cached_iertr::make_ready_future<CachedExtentRef>();
} else if (!ret->is_fully_loaded()) {
SUBDEBUGT(seastore_cache, "{} {} is present without "
"being fully loaded", t, type, offset);
return get_extent_if_cached_iertr::make_ready_future<CachedExtentRef>();
}
// present in cache(fully loaded) and is not a retired_placeholder
SUBDEBUGT(seastore_cache, "{} {} is present in cache -- {}",
t, type, offset, *ret);
t.add_to_read_set(ret);
touch_extent(*ret);
return ret->wait_io().then([ret] {
return get_extent_if_cached_iertr::make_ready_future<
CachedExtentRef>(ret);
});
}
/**
* get_extent
*
* returns ref to extent at offset~length of type T either from
* - t if modified by t
* - extent_set if already in cache
* - disk
*
* t *must not* have retired offset
*/
using get_extent_iertr = base_iertr;
template <typename T, typename Func>
get_extent_iertr::future<TCachedExtentRef<T>> get_extent(
Transaction &t,
paddr_t offset,
extent_len_t length,
Func &&extent_init_func) {
CachedExtentRef ret;
LOG_PREFIX(Cache::get_extent);
auto result = t.get_extent(offset, &ret);
if (result == Transaction::get_extent_ret::RETIRED) {
SUBERRORT(seastore_cache, "{} {}~{} is retired on t -- {}",
t, T::TYPE, offset, length, *ret);
ceph_abort("impossible");
} else if (result == Transaction::get_extent_ret::PRESENT) {
if (ret->is_fully_loaded()) {
SUBTRACET(seastore_cache, "{} {}~{} is present on t -- {}",
t, T::TYPE, offset, length, *ret);
return ret->wait_io().then([ret] {
return seastar::make_ready_future<TCachedExtentRef<T>>(
ret->cast<T>());
});
} else {
touch_extent(*ret);
SUBDEBUGT(seastore_cache, "{} {}~{} is present on t without been \
fully loaded, reading ...", t, T::TYPE, offset, length);
auto bp = alloc_cache_buf(ret->get_length());
ret->set_bptr(std::move(bp));
return read_extent<T>(
ret->cast<T>());
}
} else {
SUBTRACET(seastore_cache, "{} {}~{} is absent on t, query cache ...",
t, T::TYPE, offset, length);
auto f = [&t, this](CachedExtent &ext) {
t.add_to_read_set(CachedExtentRef(&ext));
touch_extent(ext);
};
auto metric_key = std::make_pair(t.get_src(), T::TYPE);
return trans_intr::make_interruptible(
get_extent<T>(
offset, length, &metric_key,
std::forward<Func>(extent_init_func), std::move(f))
);
}
}
/*
* get_absent_extent
*
* Mostly the same as Cache::get_extent(), with the only difference
* that get_absent_extent won't search the transaction's context for
* the specific CachedExtent
*/
template <typename T, typename Func>
get_extent_iertr::future<TCachedExtentRef<T>> get_absent_extent(
Transaction &t,
paddr_t offset,
extent_len_t length,
Func &&extent_init_func) {
CachedExtentRef ret;
LOG_PREFIX(Cache::get_extent);
#ifndef NDEBUG
auto r = t.get_extent(offset, &ret);
if (r != Transaction::get_extent_ret::ABSENT) {
SUBERRORT(seastore_cache, "unexpected non-absent extent {}", t, *ret);
ceph_abort();
}
#endif
SUBTRACET(seastore_cache, "{} {}~{} is absent on t, query cache ...",
t, T::TYPE, offset, length);
auto f = [&t, this](CachedExtent &ext) {
t.add_to_read_set(CachedExtentRef(&ext));
touch_extent(ext);
};
auto metric_key = std::make_pair(t.get_src(), T::TYPE);
return trans_intr::make_interruptible(
get_extent<T>(
offset, length, &metric_key,
std::forward<Func>(extent_init_func), std::move(f))
);
}
template <typename T>
get_extent_iertr::future<TCachedExtentRef<T>> get_extent(
Transaction &t,
paddr_t offset,
extent_len_t length) {
return get_extent<T>(t, offset, length, [](T &){});
}
/*
* get_absent_extent
*
* Mostly the same as Cache::get_extent(), with the only difference
* that get_absent_extent won't search the transaction's context for
* the specific CachedExtent
*/
template <typename T>
get_extent_iertr::future<TCachedExtentRef<T>> get_absent_extent(
Transaction &t,
paddr_t offset,
extent_len_t length) {
return get_absent_extent<T>(t, offset, length, [](T &){});
}
get_extent_ertr::future<CachedExtentRef> get_extent_viewable_by_trans(
Transaction &t,
CachedExtentRef extent)
{
auto p_extent = extent->get_transactional_view(t);
if (!p_extent->is_pending_in_trans(t.get_trans_id())) {
t.add_to_read_set(p_extent);
if (!p_extent->is_mutation_pending()) {
touch_extent(*p_extent);
}
}
// user should not see RETIRED_PLACEHOLDER extents
ceph_assert(p_extent->get_type() != extent_types_t::RETIRED_PLACEHOLDER);
if (!p_extent->is_fully_loaded()) {
touch_extent(*p_extent);
LOG_PREFIX(Cache::get_extent_viewable_by_trans);
SUBDEBUG(seastore_cache,
"{} {}~{} is present without been fully loaded, reading ... -- {}",
p_extent->get_type(), p_extent->get_paddr(),p_extent->get_length(),
*p_extent);
auto bp = alloc_cache_buf(p_extent->get_length());
p_extent->set_bptr(std::move(bp));
return read_extent<CachedExtent>(CachedExtentRef(p_extent));
}
return p_extent->wait_io(
).then([p_extent] {
return get_extent_ertr::make_ready_future<CachedExtentRef>(
CachedExtentRef(p_extent));
});
}
template <typename T>
get_extent_ertr::future<TCachedExtentRef<T>> get_extent_viewable_by_trans(
Transaction &t,
TCachedExtentRef<T> extent)
{
return get_extent_viewable_by_trans(t, CachedExtentRef(extent.get())
).safe_then([](auto p_extent) {
return p_extent->template cast<T>();
});
}
extent_len_t get_block_size() const {
return epm.get_block_size();
}
private:
// This is a workaround std::move_only_function not being available,
// not really worth generalizing at this time.
class extent_init_func_t {
struct callable_i {
virtual void operator()(CachedExtent &extent) = 0;
virtual ~callable_i() = default;
};
template <typename Func>
struct callable_wrapper final : callable_i {
Func func;
callable_wrapper(Func &&func) : func(std::forward<Func>(func)) {}
void operator()(CachedExtent &extent) final {
return func(extent);
}
~callable_wrapper() final = default;
};
public:
std::unique_ptr<callable_i> wrapped;
template <typename Func>
extent_init_func_t(Func &&func) : wrapped(
std::make_unique<callable_wrapper<Func>>(std::forward<Func>(func)))
{}
void operator()(CachedExtent &extent) {
return (*wrapped)(extent);
}
};
get_extent_ertr::future<CachedExtentRef> _get_extent_by_type(
extent_types_t type,
paddr_t offset,
laddr_t laddr,
extent_len_t length,
const Transaction::src_t* p_src,
extent_init_func_t &&extent_init_func,
extent_init_func_t &&on_cache
);
using get_extent_by_type_iertr = get_extent_iertr;
using get_extent_by_type_ret = get_extent_by_type_iertr::future<
CachedExtentRef>;
get_extent_by_type_ret _get_extent_by_type(
Transaction &t,
extent_types_t type,
paddr_t offset,
laddr_t laddr,
extent_len_t length,
extent_init_func_t &&extent_init_func
) {
LOG_PREFIX(Cache::get_extent_by_type);
CachedExtentRef ret;
auto status = t.get_extent(offset, &ret);
if (status == Transaction::get_extent_ret::RETIRED) {
SUBERRORT(seastore_cache, "{} {}~{} {} is retired on t -- {}",
t, type, offset, length, laddr, *ret);
ceph_abort("impossible");
} else if (status == Transaction::get_extent_ret::PRESENT) {
if (ret->is_fully_loaded()) {
SUBTRACET(seastore_cache, "{} {}~{} {} is present on t -- {}",
t, type, offset, length, laddr, *ret);
return ret->wait_io().then([ret] {
return seastar::make_ready_future<CachedExtentRef>(ret);
});
} else {
touch_extent(*ret);
SUBDEBUGT(seastore_cache, "{} {}~{} {} is present on t without been \
fully loaded, reading ...", t, type, offset, length, laddr);
auto bp = alloc_cache_buf(ret->get_length());
ret->set_bptr(std::move(bp));
return read_extent<CachedExtent>(
std::move(ret));
}
} else {
SUBTRACET(seastore_cache, "{} {}~{} {} is absent on t, query cache ...",
t, type, offset, length, laddr);
auto f = [&t, this](CachedExtent &ext) {
t.add_to_read_set(CachedExtentRef(&ext));
touch_extent(ext);
};
auto src = t.get_src();
return trans_intr::make_interruptible(
_get_extent_by_type(
type, offset, laddr, length, &src,
std::move(extent_init_func), std::move(f))
);
}
}
get_extent_by_type_ret _get_absent_extent_by_type(
Transaction &t,
extent_types_t type,
paddr_t offset,
laddr_t laddr,
extent_len_t length,
extent_init_func_t &&extent_init_func
) {
LOG_PREFIX(Cache::_get_absent_extent_by_type);
#ifndef NDEBUG
CachedExtentRef ret;
auto r = t.get_extent(offset, &ret);
if (r != Transaction::get_extent_ret::ABSENT) {
SUBERRORT(seastore_cache, "unexpected non-absent extent {}", t, *ret);
ceph_abort();
}
#endif
SUBTRACET(seastore_cache, "{} {}~{} {} is absent on t, query cache ...",
t, type, offset, length, laddr);
auto f = [&t, this](CachedExtent &ext) {
t.add_to_read_set(CachedExtentRef(&ext));
touch_extent(ext);
};
auto src = t.get_src();
return trans_intr::make_interruptible(
_get_extent_by_type(
type, offset, laddr, length, &src,
std::move(extent_init_func), std::move(f))
);
}
backref_entryrefs_by_seq_t backref_entryrefs_by_seq;
backref_entry_mset_t backref_entry_mset;
using backref_entry_query_mset_t = std::multiset<
backref_entry_t, backref_entry_t::cmp_t>;
backref_entry_query_mset_t get_backref_entries_in_range(
paddr_t start,
paddr_t end) {
auto start_iter = backref_entry_mset.lower_bound(
start,
backref_entry_t::cmp_t());
auto end_iter = backref_entry_mset.lower_bound(
end,
backref_entry_t::cmp_t());
backref_entry_query_mset_t res;
for (auto it = start_iter;
it != end_iter;
it++) {
res.emplace(it->paddr, it->laddr, it->len, it->type, it->seq);
}
return res;
}
const backref_entry_mset_t& get_backref_entry_mset() {
return backref_entry_mset;
}
backref_entryrefs_by_seq_t& get_backref_entryrefs_by_seq() {
return backref_entryrefs_by_seq;
}
const segment_info_t* get_segment_info(segment_id_t sid) {
auto provider = segment_providers_by_device_id[sid.device_id()];
if (provider) {
return &provider->get_seg_info(sid);
} else {
return nullptr;
}
}
public:
/**
* get_extent_by_type
*
* Based on type, instantiate the correct concrete type
* and read in the extent at location offset~length.
*/
template <typename Func>
get_extent_by_type_ret get_extent_by_type(
Transaction &t, ///< [in] transaction
extent_types_t type, ///< [in] type tag
paddr_t offset, ///< [in] starting addr
laddr_t laddr, ///< [in] logical address if logical
extent_len_t length, ///< [in] length
Func &&extent_init_func ///< [in] extent init func
) {
return _get_extent_by_type(
t,
type,
offset,
laddr,
length,
extent_init_func_t(std::forward<Func>(extent_init_func)));
}
/*
* get_absent_extent_by_type
*
* Mostly the same as Cache::get_extent_by_type(), with the only difference
* that get_absent_extent_by_type won't search the transaction's context for
* the specific CachedExtent
*/
template <typename Func>
get_extent_by_type_ret get_absent_extent_by_type(
Transaction &t, ///< [in] transaction
extent_types_t type, ///< [in] type tag
paddr_t offset, ///< [in] starting addr
laddr_t laddr, ///< [in] logical address if logical
extent_len_t length, ///< [in] length
Func &&extent_init_func ///< [in] extent init func
) {
return _get_absent_extent_by_type(
t,
type,
offset,
laddr,
length,
extent_init_func_t(std::forward<Func>(extent_init_func)));
}
get_extent_by_type_ret get_extent_by_type(
Transaction &t,
extent_types_t type,
paddr_t offset,
laddr_t laddr,
extent_len_t length
) {
return get_extent_by_type(
t, type, offset, laddr, length, [](CachedExtent &) {});
}
/*
* get_absent_extent_by_type
*
* Mostly the same as Cache::get_extent_by_type(), with the only difference
* that get_absent_extent_by_type won't search the transaction's context for
* the specific CachedExtent
*/
get_extent_by_type_ret get_absent_extent_by_type(
Transaction &t,
extent_types_t type,
paddr_t offset,
laddr_t laddr,
extent_len_t length
) {
return get_absent_extent_by_type(
t, type, offset, laddr, length, [](CachedExtent &) {});
}
void trim_backref_bufs(const journal_seq_t &trim_to) {
LOG_PREFIX(Cache::trim_backref_bufs);
SUBDEBUG(seastore_cache, "trimming to {}", trim_to);
if (!backref_entryrefs_by_seq.empty()) {
SUBDEBUG(seastore_cache, "backref_entryrefs_by_seq {} ~ {}, size={}",
backref_entryrefs_by_seq.rbegin()->first,
backref_entryrefs_by_seq.begin()->first,
backref_entryrefs_by_seq.size());
assert(backref_entryrefs_by_seq.rbegin()->first >= trim_to);
auto iter = backref_entryrefs_by_seq.upper_bound(trim_to);
backref_entryrefs_by_seq.erase(backref_entryrefs_by_seq.begin(), iter);
}
if (backref_entryrefs_by_seq.empty()) {
SUBDEBUG(seastore_cache, "backref_entryrefs_by_seq all trimmed");
}
}
/**
* alloc_new_extent
*
* Allocates a fresh extent. if delayed is true, addr will be alloc'd later.
* Note that epaddr can only be fed by the btree lba unittest for now
*/
template <typename T>
TCachedExtentRef<T> alloc_new_extent(
Transaction &t, ///< [in, out] current transaction
extent_len_t length, ///< [in] length
placement_hint_t hint, ///< [in] user hint
#ifdef UNIT_TESTS_BUILT
rewrite_gen_t gen, ///< [in] rewrite generation
std::optional<paddr_t> epaddr = std::nullopt ///< [in] paddr fed by callers
#else
rewrite_gen_t gen
#endif
) {
LOG_PREFIX(Cache::alloc_new_extent);
SUBTRACET(seastore_cache, "allocate {} {}B, hint={}, gen={}",
t, T::TYPE, length, hint, rewrite_gen_printer_t{gen});
#ifdef UNIT_TESTS_BUILT
auto result = epm.alloc_new_extent(t, T::TYPE, length, hint, gen, epaddr);
#else
auto result = epm.alloc_new_extent(t, T::TYPE, length, hint, gen);
#endif
auto ret = CachedExtent::make_cached_extent_ref<T>(std::move(result.bp));
ret->init(CachedExtent::extent_state_t::INITIAL_WRITE_PENDING,
result.paddr,
hint,
result.gen,
t.get_trans_id());
t.add_fresh_extent(ret);
SUBDEBUGT(seastore_cache,
"allocated {} {}B extent at {}, hint={}, gen={} -- {}",
t, T::TYPE, length, result.paddr,
hint, rewrite_gen_printer_t{result.gen}, *ret);
return ret;
}
/**
* alloc_remapped_extent
*
* Allocates an EXIST_CLEAN extent. Use the buffer to fill the new extent
* if buffer exists.
*/
template <typename T>
TCachedExtentRef<T> alloc_remapped_extent(
Transaction &t,
laddr_t remap_laddr,
paddr_t remap_paddr,
extent_len_t remap_length,
laddr_t original_laddr,
std::optional<ceph::bufferptr> &&original_bptr) {
LOG_PREFIX(Cache::alloc_remapped_extent);
SUBTRACET(seastore_cache, "allocate {} {}B, hint={}",
t, T::TYPE, remap_length, remap_laddr);
assert(remap_laddr >= original_laddr);
TCachedExtentRef<T> ext;
if (original_bptr.has_value()) {
// shallow copy the buffer from original extent
auto nbp = ceph::bufferptr(
*original_bptr,
remap_laddr - original_laddr,
remap_length);
// ExtentPlacementManager::alloc_new_extent will make a new
// (relative/temp) paddr, so make extent directly
ext = CachedExtent::make_cached_extent_ref<T>(std::move(nbp));
} else {
ext = CachedExtent::make_placeholder_cached_extent_ref<T>(remap_length);
}
ext->init(CachedExtent::extent_state_t::EXIST_CLEAN,
remap_paddr,
PLACEMENT_HINT_NULL,
NULL_GENERATION,
t.get_trans_id());
t.add_fresh_extent(ext);
return ext;
}
/**
* alloc_new_extent
*
* Allocates a fresh extent. addr will be relative until commit.
*/
CachedExtentRef alloc_new_extent_by_type(
Transaction &t, ///< [in, out] current transaction
extent_types_t type, ///< [in] type tag
extent_len_t length, ///< [in] length
placement_hint_t hint, ///< [in] user hint
rewrite_gen_t gen ///< [in] rewrite generation
);
/**
* Allocates mutable buffer from extent_set on offset~len
*
* TODO: Note, currently all implementations literally copy the
* buffer. This needn't be true, CachedExtent implementations could
* choose to refer to the same buffer unmodified until commit and just
* buffer the mutations in an ancillary data structure.
*
* @param current transaction
* @param extent to duplicate
* @return mutable extent
*/
CachedExtentRef duplicate_for_write(
Transaction &t, ///< [in, out] current transaction
CachedExtentRef i ///< [in] ref to existing extent
);
/**
* set_segment_provider
*
* Set to provide segment information to help identify out-dated delta.
*
* FIXME: This is specific to the segmented implementation
*/
void set_segment_providers(std::vector<SegmentProvider*> &&providers) {
segment_providers_by_device_id = std::move(providers);
}
/**
* prepare_record
*
* Construct the record for Journal from transaction.
*/
record_t prepare_record(
Transaction &t, ///< [in, out] current transaction
const journal_seq_t &journal_head,
const journal_seq_t &journal_dirty_tail
);
/**
* complete_commit
*
* Must be called upon completion of write. Releases blocks on mutating
* extents, fills in addresses, and calls relevant callbacks on fresh
* and mutated exents.
*/
void complete_commit(
Transaction &t, ///< [in, out] current transaction
paddr_t final_block_start, ///< [in] offset of initial block
journal_seq_t seq ///< [in] journal commit seq
);
/**
* init
*/
void init();
/**
* mkfs
*
* Alloc initial root node and add to t. The intention is for other
* components to use t to adjust the resulting root ref prior to commit.
*/
using mkfs_iertr = base_iertr;
mkfs_iertr::future<> mkfs(Transaction &t);
/**
* close
*
* TODO: should flush dirty blocks
*/
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
close_ertr::future<> close();
/**
* replay_delta
*
* Intended for use in Journal::delta. For each delta, should decode delta,
* read relevant block from disk or cache (using correct type), and call
* CachedExtent::apply_delta marking the extent dirty.
*
* Returns whether the delta is applied.
*/
using replay_delta_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using replay_delta_ret = replay_delta_ertr::future<bool>;
replay_delta_ret replay_delta(
journal_seq_t seq,
paddr_t record_block_base,
const delta_info_t &delta,
const journal_seq_t &dirty_tail,
const journal_seq_t &alloc_tail,
sea_time_point modify_time);
/**
* init_cached_extents
*
* Calls passed lambda for each dirty cached block. Intended for use
* after replay to allow lba_manager (or w/e) to read in any ancestor
* blocks.
*/
using init_cached_extents_iertr = base_iertr;
using init_cached_extents_ret = init_cached_extents_iertr::future<>;
template <typename F>
init_cached_extents_ret init_cached_extents(
Transaction &t,
F &&f)
{
LOG_PREFIX(Cache::init_cached_extents);
SUBINFOT(seastore_cache,
"start with {}({}B) extents, {} dirty, dirty_from={}, alloc_from={}",
t,
extents.size(),
extents.get_bytes(),
dirty.size(),
get_oldest_dirty_from().value_or(JOURNAL_SEQ_NULL),
get_oldest_backref_dirty_from().value_or(JOURNAL_SEQ_NULL));
// journal replay should has been finished at this point,
// Cache::root should have been inserted to the dirty list
assert(root->is_dirty());
std::vector<CachedExtentRef> _dirty;
for (auto &e : extents) {
_dirty.push_back(CachedExtentRef(&e));
}
return seastar::do_with(
std::forward<F>(f),
std::move(_dirty),
[this, FNAME, &t](auto &f, auto &refs) mutable
{
return trans_intr::do_for_each(
refs,
[this, FNAME, &t, &f](auto &e)
{
SUBTRACET(seastore_cache, "inspecting extent ... -- {}", t, *e);
return f(t, e
).si_then([this, FNAME, &t, e](bool is_alive) {
if (!is_alive) {
SUBDEBUGT(seastore_cache, "extent is not alive, remove extent -- {}", t, *e);
remove_extent(e);
e->set_invalid(t);
} else {
SUBDEBUGT(seastore_cache, "extent is alive -- {}", t, *e);
}
});
});
}).handle_error_interruptible(
init_cached_extents_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in Cache::init_cached_extents"
}
).si_then([this, FNAME, &t] {
SUBINFOT(seastore_cache,
"finish with {}({}B) extents, {} dirty, dirty_from={}, alloc_from={}",
t,
extents.size(),
extents.get_bytes(),
dirty.size(),
get_oldest_dirty_from().value_or(JOURNAL_SEQ_NULL),
get_oldest_backref_dirty_from().value_or(JOURNAL_SEQ_NULL));
});
}
/**
* update_extent_from_transaction
*
* Updates passed extent based on t. If extent has been retired,
* a null result will be returned.
*/
CachedExtentRef update_extent_from_transaction(
Transaction &t,
CachedExtentRef extent) {
if (extent->get_type() == extent_types_t::ROOT) {
if (t.root) {
return t.root;
} else {
t.add_to_read_set(extent);
t.root = extent->cast<RootBlock>();
return extent;
}
} else {
auto result = t.get_extent(extent->get_paddr(), &extent);
if (result == Transaction::get_extent_ret::RETIRED) {
return CachedExtentRef();
} else {
if (result == Transaction::get_extent_ret::ABSENT) {
t.add_to_read_set(extent);
}
return extent;
}
}
}
/**
* print
*
* Dump summary of contents (TODO)
*/
std::ostream &print(
std::ostream &out) const {
return out;
}
/**
* get_next_dirty_extents
*
* Returns extents with get_dirty_from() < seq and adds to read set of
* t.
*/
using get_next_dirty_extents_iertr = base_iertr;
using get_next_dirty_extents_ret = get_next_dirty_extents_iertr::future<
std::vector<CachedExtentRef>>;
get_next_dirty_extents_ret get_next_dirty_extents(
Transaction &t,
journal_seq_t seq,
size_t max_bytes);
/// returns std::nullopt if no pending alloc-infos
std::optional<journal_seq_t> get_oldest_backref_dirty_from() const {
LOG_PREFIX(Cache::get_oldest_backref_dirty_from);
if (backref_entryrefs_by_seq.empty()) {
SUBDEBUG(seastore_cache, "backref_oldest: null");
return std::nullopt;
}
auto oldest = backref_entryrefs_by_seq.begin()->first;
SUBDEBUG(seastore_cache, "backref_oldest: {}", oldest);
ceph_assert(oldest != JOURNAL_SEQ_NULL);
return oldest;
}
/// returns std::nullopt if no dirty extents
/// returns JOURNAL_SEQ_NULL if the oldest dirty extent is still pending
std::optional<journal_seq_t> get_oldest_dirty_from() const {
LOG_PREFIX(Cache::get_oldest_dirty_from);
if (dirty.empty()) {
SUBDEBUG(seastore_cache, "dirty_oldest: null");
return std::nullopt;
} else {
auto oldest = dirty.begin()->get_dirty_from();
if (oldest == JOURNAL_SEQ_NULL) {
SUBDEBUG(seastore_cache, "dirty_oldest: pending");
} else {
SUBDEBUG(seastore_cache, "dirty_oldest: {}", oldest);
}
return oldest;
}
}
/// Dump live extents
void dump_contents();
/**
* backref_extent_entry_t
*
* All the backref extent entries have to be indexed by paddr in memory,
* so they can be retrived by range during cleaning.
*
* See BtreeBackrefManager::retrieve_backref_extents_in_range()
*/
struct backref_extent_entry_t {
backref_extent_entry_t(
paddr_t paddr,
paddr_t key,
extent_types_t type)
: paddr(paddr), key(key), type(type) {}
paddr_t paddr = P_ADDR_NULL;
paddr_t key = P_ADDR_NULL;
extent_types_t type = extent_types_t::ROOT;
struct cmp_t {
using is_transparent = paddr_t;
bool operator()(
const backref_extent_entry_t &l,
const backref_extent_entry_t &r) const {
return l.paddr < r.paddr;
}
bool operator()(
const paddr_t &l,
const backref_extent_entry_t &r) const {
return l < r.paddr;
}
bool operator()(
const backref_extent_entry_t &l,
const paddr_t &r) const {
return l.paddr < r;
}
};
};
void update_tree_extents_num(extent_types_t type, int64_t delta) {
switch (type) {
case extent_types_t::LADDR_INTERNAL:
[[fallthrough]];
case extent_types_t::DINK_LADDR_LEAF:
[[fallthrough]];
case extent_types_t::LADDR_LEAF:
stats.lba_tree_extents_num += delta;
ceph_assert(stats.lba_tree_extents_num >= 0);
return;
case extent_types_t::OMAP_INNER:
[[fallthrough]];
case extent_types_t::OMAP_LEAF:
stats.omap_tree_extents_num += delta;
ceph_assert(stats.lba_tree_extents_num >= 0);
return;
case extent_types_t::ONODE_BLOCK_STAGED:
stats.onode_tree_extents_num += delta;
ceph_assert(stats.onode_tree_extents_num >= 0);
return;
case extent_types_t::BACKREF_INTERNAL:
[[fallthrough]];
case extent_types_t::BACKREF_LEAF:
stats.backref_tree_extents_num += delta;
ceph_assert(stats.backref_tree_extents_num >= 0);
return;
default:
return;
}
}
uint64_t get_omap_tree_depth() {
return stats.omap_tree_depth;
}
/// Update lru for access to ref
void touch_extent(
CachedExtent &ext,
const Transaction::src_t* p_src=nullptr)
{
if (p_src && is_background_transaction(*p_src))
return;
if (ext.is_clean() && !ext.is_placeholder()) {
lru.move_to_top(ext);
}
}
private:
ExtentPlacementManager& epm;
RootBlockRef root; ///< ref to current root
ExtentIndex extents; ///< set of live extents
journal_seq_t last_commit = JOURNAL_SEQ_MIN;
// FIXME: This is specific to the segmented implementation
std::vector<SegmentProvider*> segment_providers_by_device_id;
transaction_id_t next_id = 0;
/**
* dirty
*
* holds refs to dirty extents. Ordered by CachedExtent::get_dirty_from().
*/
CachedExtent::list dirty;
using backref_extent_entry_query_set_t =
std::set<
backref_extent_entry_t,
backref_extent_entry_t::cmp_t>;
backref_extent_entry_query_set_t backref_extents;
void add_backref_extent(
paddr_t paddr,
paddr_t key,
extent_types_t type) {
assert(!paddr.is_relative());
auto [iter, inserted] = backref_extents.emplace(paddr, key, type);
boost::ignore_unused(inserted);
assert(inserted);
}
void remove_backref_extent(paddr_t paddr) {
auto iter = backref_extents.find(paddr);
if (iter != backref_extents.end())
backref_extents.erase(iter);
}
backref_extent_entry_query_set_t get_backref_extents_in_range(
paddr_t start,
paddr_t end) {
auto start_iter = backref_extents.lower_bound(start);
auto end_iter = backref_extents.upper_bound(end);
backref_extent_entry_query_set_t res;
res.insert(start_iter, end_iter);
return res;
}
friend class crimson::os::seastore::backref::BtreeBackrefManager;
friend class crimson::os::seastore::BackrefManager;
/**
* lru
*
* holds references to recently used extents
*/
class LRU {
// max size (bytes)
const size_t capacity = 0;
// current size (bytes)
size_t contents = 0;
CachedExtent::list lru;
void trim_to_capacity() {
while (contents > capacity) {
assert(lru.size() > 0);
remove_from_lru(lru.front());
}
}
void add_to_lru(CachedExtent &extent) {
assert(extent.is_clean() && !extent.is_placeholder());
if (!extent.primary_ref_list_hook.is_linked()) {
contents += extent.get_length();
intrusive_ptr_add_ref(&extent);
lru.push_back(extent);
}
trim_to_capacity();
}
public:
LRU(size_t capacity) : capacity(capacity) {}
size_t get_capacity() const {
return capacity;
}
size_t get_current_contents_bytes() const {
return contents;
}
size_t get_current_contents_extents() const {
return lru.size();
}
void remove_from_lru(CachedExtent &extent) {
assert(extent.is_clean() && !extent.is_placeholder());
if (extent.primary_ref_list_hook.is_linked()) {
lru.erase(lru.s_iterator_to(extent));
assert(contents >= extent.get_length());
contents -= extent.get_length();
intrusive_ptr_release(&extent);
}
}
void move_to_top(CachedExtent &extent) {
assert(extent.is_clean() && !extent.is_placeholder());
if (extent.primary_ref_list_hook.is_linked()) {
lru.erase(lru.s_iterator_to(extent));
intrusive_ptr_release(&extent);
assert(contents >= extent.get_length());
contents -= extent.get_length();
}
add_to_lru(extent);
}
void clear() {
LOG_PREFIX(Cache::LRU::clear);
for (auto iter = lru.begin(); iter != lru.end();) {
SUBDEBUG(seastore_cache, "clearing {}", *iter);
remove_from_lru(*(iter++));
}
}
~LRU() {
clear();
}
} lru;
struct query_counters_t {
uint64_t access = 0;
uint64_t hit = 0;
};
template <typename CounterT>
using counter_by_extent_t = std::array<CounterT, EXTENT_TYPES_MAX>;
struct invalid_trans_efforts_t {
io_stat_t read;
io_stat_t mutate;
uint64_t mutate_delta_bytes = 0;
io_stat_t retire;
io_stat_t fresh;
io_stat_t fresh_ool_written;
counter_by_extent_t<uint64_t> num_trans_invalidated;
uint64_t total_trans_invalidated = 0;
uint64_t num_ool_records = 0;
uint64_t ool_record_bytes = 0;
};
struct commit_trans_efforts_t {
counter_by_extent_t<io_stat_t> read_by_ext;
counter_by_extent_t<io_stat_t> mutate_by_ext;
counter_by_extent_t<uint64_t> delta_bytes_by_ext;
counter_by_extent_t<io_stat_t> retire_by_ext;
counter_by_extent_t<io_stat_t> fresh_invalid_by_ext; // inline but is already invalid (retired)
counter_by_extent_t<io_stat_t> fresh_inline_by_ext;
counter_by_extent_t<io_stat_t> fresh_ool_by_ext;
uint64_t num_trans = 0; // the number of inline records
uint64_t num_ool_records = 0;
uint64_t ool_record_metadata_bytes = 0;
uint64_t ool_record_data_bytes = 0;
uint64_t inline_record_metadata_bytes = 0; // metadata exclude the delta bytes
};
struct success_read_trans_efforts_t {
io_stat_t read;
uint64_t num_trans = 0;
};
struct tree_efforts_t {
uint64_t num_inserts = 0;
uint64_t num_erases = 0;
uint64_t num_updates = 0;
void increment(const Transaction::tree_stats_t& incremental) {
num_inserts += incremental.num_inserts;
num_erases += incremental.num_erases;
num_updates += incremental.num_updates;
}
};
template <typename CounterT>
using counter_by_src_t = std::array<CounterT, TRANSACTION_TYPE_MAX>;
static constexpr std::size_t NUM_SRC_COMB =
TRANSACTION_TYPE_MAX * (TRANSACTION_TYPE_MAX + 1) / 2;
struct {
counter_by_src_t<uint64_t> trans_created_by_src;
counter_by_src_t<commit_trans_efforts_t> committed_efforts_by_src;
counter_by_src_t<invalid_trans_efforts_t> invalidated_efforts_by_src;
counter_by_src_t<query_counters_t> cache_query_by_src;
success_read_trans_efforts_t success_read_efforts;
uint64_t dirty_bytes = 0;
uint64_t onode_tree_depth = 0;
int64_t onode_tree_extents_num = 0;
counter_by_src_t<tree_efforts_t> committed_onode_tree_efforts;
counter_by_src_t<tree_efforts_t> invalidated_onode_tree_efforts;
uint64_t omap_tree_depth = 0;
int64_t omap_tree_extents_num = 0;
counter_by_src_t<tree_efforts_t> committed_omap_tree_efforts;
counter_by_src_t<tree_efforts_t> invalidated_omap_tree_efforts;
uint64_t lba_tree_depth = 0;
int64_t lba_tree_extents_num = 0;
counter_by_src_t<tree_efforts_t> committed_lba_tree_efforts;
counter_by_src_t<tree_efforts_t> invalidated_lba_tree_efforts;
uint64_t backref_tree_depth = 0;
int64_t backref_tree_extents_num = 0;
counter_by_src_t<tree_efforts_t> committed_backref_tree_efforts;
counter_by_src_t<tree_efforts_t> invalidated_backref_tree_efforts;
std::array<uint64_t, NUM_SRC_COMB> trans_conflicts_by_srcs;
counter_by_src_t<uint64_t> trans_conflicts_by_unknown;
version_stat_t committed_dirty_version;
version_stat_t committed_reclaim_version;
} stats;
template <typename CounterT>
CounterT& get_by_src(
counter_by_src_t<CounterT>& counters_by_src,
Transaction::src_t src) {
assert(static_cast<std::size_t>(src) < counters_by_src.size());
return counters_by_src[static_cast<std::size_t>(src)];
}
template <typename CounterT>
CounterT& get_by_ext(
counter_by_extent_t<CounterT>& counters_by_ext,
extent_types_t ext) {
auto index = static_cast<uint8_t>(ext);
assert(index < EXTENT_TYPES_MAX);
return counters_by_ext[index];
}
void account_conflict(Transaction::src_t src1, Transaction::src_t src2) {
assert(src1 < Transaction::src_t::MAX);
assert(src2 < Transaction::src_t::MAX);
if (src1 > src2) {
std::swap(src1, src2);
}
// impossible combinations
// should be consistent with trans_srcs_invalidated in register_metrics()
assert(!(src1 == Transaction::src_t::READ &&
src2 == Transaction::src_t::READ));
assert(!(src1 == Transaction::src_t::TRIM_DIRTY &&
src2 == Transaction::src_t::TRIM_DIRTY));
assert(!(src1 == Transaction::src_t::CLEANER_MAIN &&
src2 == Transaction::src_t::CLEANER_MAIN));
assert(!(src1 == Transaction::src_t::CLEANER_COLD &&
src2 == Transaction::src_t::CLEANER_COLD));
assert(!(src1 == Transaction::src_t::TRIM_ALLOC &&
src2 == Transaction::src_t::TRIM_ALLOC));
auto src1_value = static_cast<std::size_t>(src1);
auto src2_value = static_cast<std::size_t>(src2);
auto num_srcs = static_cast<std::size_t>(Transaction::src_t::MAX);
auto conflict_index = num_srcs * src1_value + src2_value -
src1_value * (src1_value + 1) / 2;
assert(conflict_index < NUM_SRC_COMB);
++stats.trans_conflicts_by_srcs[conflict_index];
}
seastar::metrics::metric_group metrics;
void register_metrics();
/// alloc buffer for cached extent
bufferptr alloc_cache_buf(size_t size) {
// TODO: memory pooling etc
auto bp = ceph::bufferptr(
buffer::create_page_aligned(size));
bp.zero();
return bp;
}
void backref_batch_update(
std::vector<backref_entry_ref> &&,
const journal_seq_t &);
/// Add extent to extents handling dirty and refcounting
void add_extent(CachedExtentRef ref, const Transaction::src_t* t_src);
/// Mark exising extent ref dirty -- mainly for replay
void mark_dirty(CachedExtentRef ref);
/// Add dirty extent to dirty list
void add_to_dirty(CachedExtentRef ref);
/// Remove from dirty list
void remove_from_dirty(CachedExtentRef ref);
/// Remove extent from extents handling dirty and refcounting
void remove_extent(CachedExtentRef ref);
/// Retire extent
void commit_retire_extent(Transaction& t, CachedExtentRef ref);
/// Replace prev with next
void commit_replace_extent(Transaction& t, CachedExtentRef next, CachedExtentRef prev);
/// Invalidate extent and mark affected transactions
void invalidate_extent(Transaction& t, CachedExtent& extent);
/// Mark a valid transaction as conflicted
void mark_transaction_conflicted(
Transaction& t, CachedExtent& conflicting_extent);
/// Introspect transaction when it is being destructed
void on_transaction_destruct(Transaction& t);
template <typename T>
get_extent_ret<T> read_extent(
TCachedExtentRef<T>&& extent
) {
assert(extent->state == CachedExtent::extent_state_t::CLEAN_PENDING ||
extent->state == CachedExtent::extent_state_t::EXIST_CLEAN ||
extent->state == CachedExtent::extent_state_t::CLEAN);
extent->set_io_wait();
return epm.read(
extent->get_paddr(),
extent->get_length(),
extent->get_bptr()
).safe_then(
[extent=std::move(extent)]() mutable {
LOG_PREFIX(Cache::read_extent);
if (likely(extent->state == CachedExtent::extent_state_t::CLEAN_PENDING)) {
extent->state = CachedExtent::extent_state_t::CLEAN;
/* TODO: crc should be checked against LBA manager */
extent->last_committed_crc = extent->get_crc32c();
extent->on_clean_read();
} else if (extent->state == CachedExtent::extent_state_t::EXIST_CLEAN ||
extent->state == CachedExtent::extent_state_t::CLEAN) {
/* TODO: crc should be checked against LBA manager */
extent->last_committed_crc = extent->get_crc32c();
} else {
ceph_assert(!extent->is_valid());
}
extent->complete_io();
SUBDEBUG(seastore_cache, "read extent done -- {}", *extent);
return get_extent_ertr::make_ready_future<TCachedExtentRef<T>>(
std::move(extent));
},
get_extent_ertr::pass_further{},
crimson::ct_error::assert_all{
"Cache::get_extent: invalid error"
}
);
}
// Extents in cache may contain placeholders
CachedExtentRef query_cache(
paddr_t offset,
const src_ext_t* p_metric_key) {
query_counters_t* p_counters = nullptr;
if (p_metric_key) {
p_counters = &get_by_src(stats.cache_query_by_src, p_metric_key->first);
++p_counters->access;
}
if (auto iter = extents.find_offset(offset);
iter != extents.end()) {
if (p_metric_key &&
// retired_placeholder is not really cached yet
iter->get_type() != extent_types_t::RETIRED_PLACEHOLDER) {
++p_counters->hit;
}
return CachedExtentRef(&*iter);
} else {
return CachedExtentRef();
}
}
template <
typename node_key_t,
typename node_val_t,
typename internal_node_t,
typename leaf_node_t,
typename pin_t,
size_t node_size,
bool leaf_has_children>
friend class FixedKVBtree;
};
using CacheRef = std::unique_ptr<Cache>;
}
| 53,387 | 30.66548 | 99 | h |
null | ceph-main/src/crimson/os/seastore/cached_extent.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/transaction.h"
#include "crimson/common/log.h"
#include "crimson/os/seastore/btree/fixed_kv_node.h"
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_seastore_tm);
}
}
namespace crimson::os::seastore {
#ifdef DEBUG_CACHED_EXTENT_REF
void intrusive_ptr_add_ref(CachedExtent *ptr)
{
intrusive_ptr_add_ref(
static_cast<boost::intrusive_ref_counter<
CachedExtent,
boost::thread_unsafe_counter>*>(ptr));
logger().debug("intrusive_ptr_add_ref: {}", *ptr);
}
void intrusive_ptr_release(CachedExtent *ptr)
{
logger().debug("intrusive_ptr_release: {}", *ptr);
intrusive_ptr_release(
static_cast<boost::intrusive_ref_counter<
CachedExtent,
boost::thread_unsafe_counter>*>(ptr));
}
#endif
bool is_backref_mapped_extent_node(const CachedExtentRef &extent) {
return extent->is_logical()
|| is_lba_node(extent->get_type())
|| extent->get_type() == extent_types_t::TEST_BLOCK_PHYSICAL;
}
std::ostream &operator<<(std::ostream &out, CachedExtent::extent_state_t state)
{
switch (state) {
case CachedExtent::extent_state_t::INITIAL_WRITE_PENDING:
return out << "INITIAL_WRITE_PENDING";
case CachedExtent::extent_state_t::MUTATION_PENDING:
return out << "MUTATION_PENDING";
case CachedExtent::extent_state_t::CLEAN_PENDING:
return out << "CLEAN_PENDING";
case CachedExtent::extent_state_t::CLEAN:
return out << "CLEAN";
case CachedExtent::extent_state_t::DIRTY:
return out << "DIRTY";
case CachedExtent::extent_state_t::EXIST_CLEAN:
return out << "EXIST_CLEAN";
case CachedExtent::extent_state_t::EXIST_MUTATION_PENDING:
return out << "EXIST_MUTATION_PENDING";
case CachedExtent::extent_state_t::INVALID:
return out << "INVALID";
default:
return out << "UNKNOWN";
}
}
std::ostream &operator<<(std::ostream &out, const CachedExtent &ext)
{
return ext.print(out);
}
CachedExtent::~CachedExtent()
{
if (parent_index) {
assert(is_linked());
parent_index->erase(*this);
}
}
CachedExtent* CachedExtent::get_transactional_view(Transaction &t) {
return get_transactional_view(t.get_trans_id());
}
CachedExtent* CachedExtent::get_transactional_view(transaction_id_t tid) {
auto it = mutation_pendings.find(tid, trans_spec_view_t::cmp_t());
if (it != mutation_pendings.end()) {
return (CachedExtent*)&(*it);
} else {
return this;
}
}
std::ostream &operator<<(std::ostream &out, const parent_tracker_t &tracker) {
return out << "parent_tracker=" << (void*)&tracker
<< ", parent=" << (void*)tracker.get_parent().get();
}
std::ostream &ChildableCachedExtent::print_detail(std::ostream &out) const {
if (parent_tracker) {
out << *parent_tracker;
} else {
out << ", parent_tracker=" << (void*)nullptr;
}
_print_detail(out);
return out;
}
std::ostream &LogicalCachedExtent::_print_detail(std::ostream &out) const
{
out << ", laddr=" << laddr;
return print_detail_l(out);
}
void child_pos_t::link_child(ChildableCachedExtent *c) {
get_parent<FixedKVNode<laddr_t>>()->link_child(c, pos);
}
void CachedExtent::set_invalid(Transaction &t) {
state = extent_state_t::INVALID;
if (trans_view_hook.is_linked()) {
trans_view_hook.unlink();
}
on_invalidated(t);
}
LogicalCachedExtent::~LogicalCachedExtent() {
if (has_parent_tracker() && is_valid() && !is_pending()) {
assert(get_parent_node());
auto parent = get_parent_node<FixedKVNode<laddr_t>>();
auto off = parent->lower_bound_offset(laddr);
assert(parent->get_key_from_idx(off) == laddr);
assert(parent->children[off] == this);
parent->children[off] = nullptr;
}
}
void LogicalCachedExtent::on_replace_prior(Transaction &t) {
assert(is_mutation_pending());
take_prior_parent_tracker();
assert(get_parent_node());
auto parent = get_parent_node<FixedKVNode<laddr_t>>();
//TODO: can this search be avoided?
auto off = parent->lower_bound_offset(laddr);
assert(parent->get_key_from_idx(off) == laddr);
parent->children[off] = this;
}
parent_tracker_t::~parent_tracker_t() {
// this is parent's tracker, reset it
auto &p = (FixedKVNode<laddr_t>&)*parent;
if (p.my_tracker == this) {
p.my_tracker = nullptr;
}
}
std::ostream &operator<<(std::ostream &out, const LBAMapping &rhs)
{
return out << "LBAMapping(" << rhs.get_key() << "~" << rhs.get_length()
<< "->" << rhs.get_val();
}
std::ostream &operator<<(std::ostream &out, const lba_pin_list_t &rhs)
{
bool first = true;
out << '[';
for (const auto &i: rhs) {
out << (first ? "" : ",") << *i;
first = false;
}
return out << ']';
}
}
| 4,805 | 26.152542 | 79 | cc |
null | ceph-main/src/crimson/os/seastore/cached_extent.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "seastar/core/shared_future.hh"
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/common/interruptible_future.h"
#include "crimson/os/seastore/seastore_types.h"
struct btree_lba_manager_test;
namespace crimson::os::seastore {
class Transaction;
class CachedExtent;
using CachedExtentRef = boost::intrusive_ptr<CachedExtent>;
class SegmentedAllocator;
class TransactionManager;
class ExtentPlacementManager;
template <
typename node_key_t,
typename node_val_t,
typename internal_node_t,
typename leaf_node_t,
typename pin_t,
size_t node_size,
bool leaf_has_children>
class FixedKVBtree;
template <typename, typename>
class BtreeNodeMapping;
// #define DEBUG_CACHED_EXTENT_REF
#ifdef DEBUG_CACHED_EXTENT_REF
void intrusive_ptr_add_ref(CachedExtent *);
void intrusive_ptr_release(CachedExtent *);
#endif
template <typename T>
using TCachedExtentRef = boost::intrusive_ptr<T>;
/**
* CachedExtent
*/
namespace onode {
class DummyNodeExtent;
class TestReplayExtent;
}
template <typename T>
class read_set_item_t {
using set_hook_t = boost::intrusive::set_member_hook<
boost::intrusive::link_mode<
boost::intrusive::auto_unlink>>;
set_hook_t trans_hook;
using set_hook_options = boost::intrusive::member_hook<
read_set_item_t,
set_hook_t,
&read_set_item_t::trans_hook>;
public:
struct cmp_t {
using is_transparent = paddr_t;
bool operator()(const read_set_item_t<T> &lhs, const read_set_item_t &rhs) const;
bool operator()(const paddr_t &lhs, const read_set_item_t<T> &rhs) const;
bool operator()(const read_set_item_t<T> &lhs, const paddr_t &rhs) const;
};
struct trans_cmp_t {
bool operator()(
const read_set_item_t<Transaction> &lhs,
const read_set_item_t<Transaction> &rhs) const {
return lhs.t < rhs.t;
}
bool operator()(
const Transaction *lhs,
const read_set_item_t<Transaction> &rhs) const {
return lhs < rhs.t;
}
bool operator()(
const read_set_item_t<Transaction> &lhs,
const Transaction *rhs) const {
return lhs.t < rhs;
}
};
using trans_set_t = boost::intrusive::set<
read_set_item_t,
set_hook_options,
boost::intrusive::constant_time_size<false>,
boost::intrusive::compare<trans_cmp_t>>;
T *t = nullptr;
CachedExtentRef ref;
read_set_item_t(T *t, CachedExtentRef ref);
read_set_item_t(const read_set_item_t &) = delete;
read_set_item_t(read_set_item_t &&) = default;
~read_set_item_t() = default;
};
template <typename T>
using read_set_t = std::set<
read_set_item_t<T>,
typename read_set_item_t<T>::cmp_t>;
struct trans_spec_view_t {
// if the extent is pending, contains the id of the owning transaction;
// TRANS_ID_NULL otherwise
transaction_id_t pending_for_transaction = TRANS_ID_NULL;
struct cmp_t {
bool operator()(
const trans_spec_view_t &lhs,
const trans_spec_view_t &rhs) const
{
return lhs.pending_for_transaction < rhs.pending_for_transaction;
}
bool operator()(
const transaction_id_t &lhs,
const trans_spec_view_t &rhs) const
{
return lhs < rhs.pending_for_transaction;
}
bool operator()(
const trans_spec_view_t &lhs,
const transaction_id_t &rhs) const
{
return lhs.pending_for_transaction < rhs;
}
};
using trans_view_hook_t =
boost::intrusive::set_member_hook<
boost::intrusive::link_mode<
boost::intrusive::auto_unlink>>;
trans_view_hook_t trans_view_hook;
using trans_view_member_options =
boost::intrusive::member_hook<
trans_spec_view_t,
trans_view_hook_t,
&trans_spec_view_t::trans_view_hook>;
using trans_view_set_t = boost::intrusive::set<
trans_spec_view_t,
trans_view_member_options,
boost::intrusive::constant_time_size<false>,
boost::intrusive::compare<cmp_t>>;
};
class ExtentIndex;
class CachedExtent
: public boost::intrusive_ref_counter<
CachedExtent, boost::thread_unsafe_counter>,
public trans_spec_view_t {
enum class extent_state_t : uint8_t {
INITIAL_WRITE_PENDING, // In Transaction::write_set and fresh_block_list
MUTATION_PENDING, // In Transaction::write_set and mutated_block_list
CLEAN_PENDING, // CLEAN, but not yet read out
CLEAN, // In Cache::extent_index, Transaction::read_set
// during write, contents match disk, version == 0
DIRTY, // Same as CLEAN, but contents do not match disk,
// version > 0
EXIST_CLEAN, // Similar to CLEAN, but its metadata not yet
// persisted to disk.
// In Transaction::write_set and existing_block_list.
// After transaction commits, state becomes CLEAN
// and add extent to Cache. Modifing such extents
// will cause state turn to EXIST_MUTATION_PENDING.
EXIST_MUTATION_PENDING,// Similar to MUTATION_PENDING, but its prior_instance
// is empty.
// In Transaction::write_set, existing_block_list and
// mutated_block_list. State becomes DIRTY and it is
// added to Cache after transaction commits.
INVALID // Part of no ExtentIndex set
} state = extent_state_t::INVALID;
friend std::ostream &operator<<(std::ostream &, extent_state_t);
// allow a dummy extent to pretend it is at a specific state
friend class onode::DummyNodeExtent;
friend class onode::TestReplayExtent;
template <
typename node_key_t,
typename node_val_t,
typename internal_node_t,
typename leaf_node_t,
typename pin_t,
size_t node_size,
bool leaf_has_children>
friend class FixedKVBtree;
uint32_t last_committed_crc = 0;
// Points at current version while in state MUTATION_PENDING
CachedExtentRef prior_instance;
// time of the last modification
sea_time_point modify_time = NULL_TIME;
public:
void init(extent_state_t _state,
paddr_t paddr,
placement_hint_t hint,
rewrite_gen_t gen,
transaction_id_t trans_id) {
assert(gen == NULL_GENERATION || is_rewrite_generation(gen));
state = _state;
set_paddr(paddr);
user_hint = hint;
rewrite_generation = gen;
pending_for_transaction = trans_id;
}
void set_modify_time(sea_time_point t) {
modify_time = t;
}
sea_time_point get_modify_time() const {
return modify_time;
}
/**
* duplicate_for_write
*
* Implementation should return a fresh CachedExtentRef
* which represents a copy of *this until on_delta_write()
* is complete, at which point the user may assume *this
* will be in state INVALID. As such, the implementation
* may involve a copy of get_bptr(), or an ancillary
* structure which defers updating the actual buffer until
* on_delta_write().
*/
virtual CachedExtentRef duplicate_for_write(Transaction &t) = 0;
/**
* prepare_write
*
* Called prior to reading buffer.
* Implemenation may use this callback to fully write out
* updates to the buffer.
*/
virtual void prepare_write() {}
/**
* prepare_commit
*
* Called prior to committing the transaction in which this extent
* is living.
*/
virtual void prepare_commit() {}
/**
* on_initial_write
*
* Called after commit of extent. State will be CLEAN.
* Implentation may use this call to fixup the buffer
* with the newly available absolute get_paddr().
*/
virtual void on_initial_write() {}
/**
* on_clean_read
*
* Called after read of initially written extent.
* State will be CLEAN. Implentation may use this
* call to fixup the buffer with the newly available
* absolute get_paddr().
*/
virtual void on_clean_read() {}
/**
* on_delta_write
*
* Called after commit of delta. State will be DIRTY.
* Implentation may use this call to fixup any relative
* references in the the buffer with the passed
* record_block_offset record location.
*/
virtual void on_delta_write(paddr_t record_block_offset) {}
/**
* on_replace_prior
*
* Called after the extent has replaced a previous one. State
* of the extent must be MUTATION_PENDING. Implementation
* may use this call to synchronize states that must be synchronized
* with the states of Cache and can't wait till transaction
* completes.
*/
virtual void on_replace_prior(Transaction &t) {}
/**
* on_invalidated
*
* Called after the extent is invalidated, either by Cache::invalidate_extent
* or Transaction::add_to_retired_set. Implementation may use this
* call to adjust states that must be changed immediately once
* invalidated.
*/
virtual void on_invalidated(Transaction &t) {}
/**
* get_type
*
* Returns concrete type.
*/
virtual extent_types_t get_type() const = 0;
virtual bool is_logical() const {
return false;
}
virtual bool may_conflict() const {
return true;
}
friend std::ostream &operator<<(std::ostream &, extent_state_t);
virtual std::ostream &print_detail(std::ostream &out) const { return out; }
std::ostream &print(std::ostream &out) const {
std::string prior_poffset_str = prior_poffset
? fmt::format("{}", *prior_poffset)
: "nullopt";
out << "CachedExtent(addr=" << this
<< ", type=" << get_type()
<< ", version=" << version
<< ", dirty_from_or_retired_at=" << dirty_from_or_retired_at
<< ", modify_time=" << sea_time_point_printer_t{modify_time}
<< ", paddr=" << get_paddr()
<< ", prior_paddr=" << prior_poffset_str
<< ", length=" << get_length()
<< ", state=" << state
<< ", last_committed_crc=" << last_committed_crc
<< ", refcount=" << use_count()
<< ", user_hint=" << user_hint
<< ", rewrite_gen=" << rewrite_gen_printer_t{rewrite_generation};
if (state != extent_state_t::INVALID &&
state != extent_state_t::CLEAN_PENDING) {
print_detail(out);
}
return out << ")";
}
/**
* get_delta
*
* Must return a valid delta usable in apply_delta() in submit_transaction
* if state == MUTATION_PENDING.
*/
virtual ceph::bufferlist get_delta() = 0;
/**
* apply_delta
*
* bl is a delta obtained previously from get_delta. The versions will
* match. Implementation should mutate buffer based on bl. base matches
* the address passed on_delta_write.
*
* Implementation *must* use set_last_committed_crc to update the crc to
* what the crc of the buffer would have been at submission. For physical
* extents that use base to adjust internal record-relative deltas, this
* means that the crc should be of the buffer after applying the delta,
* but before that adjustment. We do it this way because the crc in the
* commit path does not yet know the record base address.
*
* LogicalCachedExtent overrides this method and provides a simpler
* apply_delta override for LogicalCachedExtent implementers.
*/
virtual void apply_delta_and_adjust_crc(
paddr_t base, const ceph::bufferlist &bl) = 0;
/**
* Called on dirty CachedExtent implementation after replay.
* Implementation should perform any reads/in-memory-setup
* necessary. (for instance, the lba implementation will use this
* to load in lba_manager blocks)
*/
using complete_load_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual complete_load_ertr::future<> complete_load() {
return complete_load_ertr::now();
}
/**
* cast
*
* Returns a TCachedExtentRef of the specified type.
* TODO: add dynamic check that the requested type is actually correct.
*/
template <typename T>
TCachedExtentRef<T> cast() {
return TCachedExtentRef<T>(static_cast<T*>(this));
}
template <typename T>
TCachedExtentRef<const T> cast() const {
return TCachedExtentRef<const T>(static_cast<const T*>(this));
}
/// Returns true if extent can be mutated in an open transaction
bool is_mutable() const {
return state == extent_state_t::INITIAL_WRITE_PENDING ||
state == extent_state_t::MUTATION_PENDING ||
state == extent_state_t::EXIST_MUTATION_PENDING;
}
/// Returns true if extent is part of an open transaction
bool is_pending() const {
return is_mutable() || state == extent_state_t::EXIST_CLEAN;
}
/// Returns true if extent is stable and shared among transactions
bool is_stable() const {
return state == extent_state_t::CLEAN_PENDING ||
state == extent_state_t::CLEAN ||
state == extent_state_t::DIRTY;
}
/// Returns true if extent has a pending delta
bool is_mutation_pending() const {
return state == extent_state_t::MUTATION_PENDING;
}
/// Returns true if extent is a fresh extent
bool is_initial_pending() const {
return state == extent_state_t::INITIAL_WRITE_PENDING;
}
/// Returns true if extent is clean (does not have deltas on disk)
bool is_clean() const {
ceph_assert(is_valid());
return state == extent_state_t::INITIAL_WRITE_PENDING ||
state == extent_state_t::CLEAN ||
state == extent_state_t::CLEAN_PENDING ||
state == extent_state_t::EXIST_CLEAN;
}
/// Ruturns true if data is persisted while metadata isn't
bool is_exist_clean() const {
return state == extent_state_t::EXIST_CLEAN;
}
/// Returns true if the extent with EXTIST_CLEAN is modified
bool is_exist_mutation_pending() const {
return state == extent_state_t::EXIST_MUTATION_PENDING;
}
/// Returns true if extent is dirty (has deltas on disk)
bool is_dirty() const {
ceph_assert(is_valid());
return !is_clean();
}
/// Returns true if extent has not been superceded or retired
bool is_valid() const {
return state != extent_state_t::INVALID;
}
/// Returns true if extent or prior_instance has been invalidated
bool has_been_invalidated() const {
return !is_valid() || (is_mutation_pending() && !prior_instance->is_valid());
}
/// Returns true if extent is a plcaeholder
bool is_placeholder() const {
return get_type() == extent_types_t::RETIRED_PLACEHOLDER;
}
bool is_pending_io() const {
return !!io_wait_promise;
}
/// Return journal location of oldest relevant delta, only valid while DIRTY
auto get_dirty_from() const {
ceph_assert(is_dirty());
return dirty_from_or_retired_at;
}
/// Return journal location of oldest relevant delta, only valid while RETIRED
auto get_retired_at() const {
ceph_assert(!is_valid());
return dirty_from_or_retired_at;
}
/// Return true if extent is fully loaded or is about to be fully loaded (call
/// wait_io() in this case)
bool is_fully_loaded() const {
return ptr.has_value();
}
/**
* get_paddr
*
* Returns current address of extent. If is_initial_pending(), address will
* be relative, otherwise address will be absolute.
*/
paddr_t get_paddr() const { return poffset; }
/// Returns length of extent data in disk
extent_len_t get_length() const {
return length;
}
extent_len_t get_loaded_length() const {
if (ptr.has_value()) {
return ptr->length();
} else {
return 0;
}
}
/// Returns version, get_version() == 0 iff is_clean()
extent_version_t get_version() const {
return version;
}
/// Returns crc32c of buffer
uint32_t get_crc32c() {
return ceph_crc32c(
1,
reinterpret_cast<const unsigned char *>(get_bptr().c_str()),
get_length());
}
/// Get ref to raw buffer
bufferptr &get_bptr() {
assert(ptr.has_value());
return *ptr;
}
const bufferptr &get_bptr() const {
assert(ptr.has_value());
return *ptr;
}
/// Compare by paddr
friend bool operator< (const CachedExtent &a, const CachedExtent &b) {
return a.poffset < b.poffset;
}
friend bool operator> (const CachedExtent &a, const CachedExtent &b) {
return a.poffset > b.poffset;
}
friend bool operator== (const CachedExtent &a, const CachedExtent &b) {
return a.poffset == b.poffset;
}
virtual ~CachedExtent();
placement_hint_t get_user_hint() const {
return user_hint;
}
rewrite_gen_t get_rewrite_generation() const {
return rewrite_generation;
}
void invalidate_hints() {
user_hint = PLACEMENT_HINT_NULL;
rewrite_generation = NULL_GENERATION;
}
/// assign the target rewrite generation for the followup rewrite
void set_target_rewrite_generation(rewrite_gen_t gen) {
assert(is_target_rewrite_generation(gen));
user_hint = placement_hint_t::REWRITE;
rewrite_generation = gen;
}
bool is_inline() const {
return poffset.is_relative();
}
paddr_t get_prior_paddr_and_reset() {
assert(prior_poffset);
auto ret = *prior_poffset;
prior_poffset.reset();
return ret;
}
void set_invalid(Transaction &t);
// a rewrite extent has an invalid prior_instance,
// and a mutation_pending extent has a valid prior_instance
CachedExtentRef get_prior_instance() {
return prior_instance;
}
private:
template <typename T>
friend class read_set_item_t;
friend struct paddr_cmp;
friend struct ref_paddr_cmp;
friend class ExtentIndex;
/// Pointer to containing index (or null)
ExtentIndex *parent_index = nullptr;
/// hook for intrusive extent_index
boost::intrusive::set_member_hook<> extent_index_hook;
using index_member_options = boost::intrusive::member_hook<
CachedExtent,
boost::intrusive::set_member_hook<>,
&CachedExtent::extent_index_hook>;
using index = boost::intrusive::set<CachedExtent, index_member_options>;
friend class ExtentIndex;
friend class Transaction;
bool is_linked() {
return extent_index_hook.is_linked();
}
/// set bufferptr
void set_bptr(ceph::bufferptr &&nptr) {
ptr = nptr;
}
/// Returns true if the extent part of the open transaction
bool is_pending_in_trans(transaction_id_t id) const {
return is_pending() && pending_for_transaction == id;
}
/// hook for intrusive ref list (mainly dirty or lru list)
boost::intrusive::list_member_hook<> primary_ref_list_hook;
using primary_ref_list_member_options = boost::intrusive::member_hook<
CachedExtent,
boost::intrusive::list_member_hook<>,
&CachedExtent::primary_ref_list_hook>;
using list = boost::intrusive::list<
CachedExtent,
primary_ref_list_member_options>;
/**
* dirty_from_or_retired_at
*
* Encodes ordering token for primary_ref_list -- dirty_from when
* dirty or retired_at if retired.
*/
journal_seq_t dirty_from_or_retired_at;
/// cache data contents, std::nullopt if no data in cache
std::optional<ceph::bufferptr> ptr;
/// disk data length
extent_len_t length;
/// number of deltas since initial write
extent_version_t version = 0;
/// address of original block -- record relative iff is_initial_pending()
paddr_t poffset;
/// relative address before ool write, used to update mapping
std::optional<paddr_t> prior_poffset = std::nullopt;
/// used to wait while in-progress commit completes
std::optional<seastar::shared_promise<>> io_wait_promise;
void set_io_wait() {
ceph_assert(!io_wait_promise);
io_wait_promise = seastar::shared_promise<>();
}
void complete_io() {
ceph_assert(io_wait_promise);
io_wait_promise->set_value();
io_wait_promise = std::nullopt;
}
seastar::future<> wait_io() {
if (!io_wait_promise) {
return seastar::now();
} else {
return io_wait_promise->get_shared_future();
}
}
CachedExtent* get_transactional_view(Transaction &t);
CachedExtent* get_transactional_view(transaction_id_t tid);
read_set_item_t<Transaction>::trans_set_t transactions;
placement_hint_t user_hint = PLACEMENT_HINT_NULL;
// the target rewrite generation for the followup rewrite
// or the rewrite generation for the fresh write
rewrite_gen_t rewrite_generation = NULL_GENERATION;
protected:
trans_view_set_t mutation_pendings;
CachedExtent(CachedExtent &&other) = delete;
CachedExtent(ceph::bufferptr &&_ptr) : ptr(std::move(_ptr)) {
length = ptr->length();
assert(length > 0);
}
/// construct new CachedExtent, will deep copy the buffer
CachedExtent(const CachedExtent &other)
: state(other.state),
dirty_from_or_retired_at(other.dirty_from_or_retired_at),
length(other.get_length()),
version(other.version),
poffset(other.poffset) {
if (other.is_fully_loaded()) {
ptr = std::make_optional<ceph::bufferptr>
(other.ptr->c_str(), other.ptr->length());
} else {
// the extent must be fully loaded before CoW
assert(length == 0); // in case of root
}
}
struct share_buffer_t {};
/// construct new CachedExtent, will shallow copy the buffer
CachedExtent(const CachedExtent &other, share_buffer_t)
: state(other.state),
dirty_from_or_retired_at(other.dirty_from_or_retired_at),
ptr(other.ptr),
length(other.get_length()),
version(other.version),
poffset(other.poffset) {}
// 0 length is only possible for the RootBlock
struct zero_length_t {};
CachedExtent(zero_length_t) : ptr(ceph::bufferptr(0)), length(0) {};
struct retired_placeholder_t{};
CachedExtent(retired_placeholder_t, extent_len_t _length)
: state(extent_state_t::INVALID),
length(_length) {
assert(length > 0);
}
/// no buffer extent, for lazy read
CachedExtent(extent_len_t _length) : length(_length) {
assert(length > 0);
}
friend class Cache;
template <typename T, typename... Args>
static TCachedExtentRef<T> make_cached_extent_ref(
Args&&... args) {
return new T(std::forward<Args>(args)...);
}
template <typename T>
static TCachedExtentRef<T> make_placeholder_cached_extent_ref(
extent_len_t length) {
return new T(length);
}
void reset_prior_instance() {
prior_instance.reset();
}
/// Sets last_committed_crc
void set_last_committed_crc(uint32_t crc) {
last_committed_crc = crc;
}
void set_paddr(paddr_t offset, bool need_update_mapping = false) {
if (need_update_mapping) {
assert(!prior_poffset);
prior_poffset = poffset;
}
poffset = offset;
}
/**
* maybe_generate_relative
*
* There are three kinds of addresses one might want to
* store within an extent:
* - addr for a block within the same transaction relative to the
* physical location of this extent in the
* event that we will read it in the initial read of the extent
* - addr relative to the physical location of the next record to a
* block within that record to contain a delta for this extent in
* the event that we'll read it from a delta and overlay it onto a
* dirty representation of the extent.
* - absolute addr to a block already written outside of the current
* transaction.
*
* This helper checks addr and the current state to create the correct
* reference.
*/
paddr_t maybe_generate_relative(paddr_t addr) {
if (is_initial_pending() && addr.is_record_relative()) {
return addr.block_relative_to(get_paddr());
} else {
ceph_assert(!addr.is_record_relative() || is_mutation_pending());
return addr;
}
}
friend class crimson::os::seastore::SegmentedAllocator;
friend class crimson::os::seastore::TransactionManager;
friend class crimson::os::seastore::ExtentPlacementManager;
template <typename, typename>
friend class BtreeNodeMapping;
friend class ::btree_lba_manager_test;
};
std::ostream &operator<<(std::ostream &, CachedExtent::extent_state_t);
std::ostream &operator<<(std::ostream &, const CachedExtent&);
bool is_backref_mapped_extent_node(const CachedExtentRef &extent);
/// Compare extents by paddr
struct paddr_cmp {
bool operator()(paddr_t lhs, const CachedExtent &rhs) const {
return lhs < rhs.poffset;
}
bool operator()(const CachedExtent &lhs, paddr_t rhs) const {
return lhs.poffset < rhs;
}
};
/// Compare extent refs by paddr
struct ref_paddr_cmp {
using is_transparent = paddr_t;
bool operator()(const CachedExtentRef &lhs, const CachedExtentRef &rhs) const {
return lhs->poffset < rhs->poffset;
}
bool operator()(const paddr_t &lhs, const CachedExtentRef &rhs) const {
return lhs < rhs->poffset;
}
bool operator()(const CachedExtentRef &lhs, const paddr_t &rhs) const {
return lhs->poffset < rhs;
}
};
template <typename T, typename C>
class addr_extent_list_base_t
: public std::list<std::pair<T, C>> {};
using pextent_list_t = addr_extent_list_base_t<paddr_t, CachedExtentRef>;
template <typename T, typename C, typename Cmp>
class addr_extent_set_base_t
: public std::set<C, Cmp> {};
using pextent_set_t = addr_extent_set_base_t<
paddr_t,
CachedExtentRef,
ref_paddr_cmp
>;
template <typename T>
using t_pextent_list_t = addr_extent_list_base_t<paddr_t, TCachedExtentRef<T>>;
/**
* ExtentIndex
*
* Index of CachedExtent & by poffset, does not hold a reference,
* user must ensure each extent is removed prior to deletion
*/
class ExtentIndex {
friend class Cache;
CachedExtent::index extent_index;
public:
auto get_overlap(paddr_t addr, extent_len_t len) {
auto bottom = extent_index.upper_bound(addr, paddr_cmp());
if (bottom != extent_index.begin())
--bottom;
if (bottom != extent_index.end() &&
bottom->get_paddr().add_offset(bottom->get_length()) <= addr)
++bottom;
auto top = extent_index.lower_bound(addr.add_offset(len), paddr_cmp());
return std::make_pair(
bottom,
top
);
}
void clear() {
struct cached_extent_disposer {
void operator() (CachedExtent* extent) {
extent->parent_index = nullptr;
}
};
extent_index.clear_and_dispose(cached_extent_disposer());
bytes = 0;
}
void insert(CachedExtent &extent) {
// sanity check
ceph_assert(!extent.parent_index);
auto [a, b] = get_overlap(
extent.get_paddr(),
extent.get_length());
ceph_assert(a == b);
[[maybe_unused]] auto [iter, inserted] = extent_index.insert(extent);
assert(inserted);
extent.parent_index = this;
bytes += extent.get_length();
}
void erase(CachedExtent &extent) {
assert(extent.parent_index);
assert(extent.is_linked());
[[maybe_unused]] auto erased = extent_index.erase(
extent_index.s_iterator_to(extent));
extent.parent_index = nullptr;
assert(erased);
bytes -= extent.get_length();
}
void replace(CachedExtent &to, CachedExtent &from) {
assert(to.get_length() == from.get_length());
extent_index.replace_node(extent_index.s_iterator_to(from), to);
from.parent_index = nullptr;
to.parent_index = this;
}
bool empty() const {
return extent_index.empty();
}
auto find_offset(paddr_t offset) {
return extent_index.find(offset, paddr_cmp());
}
auto begin() {
return extent_index.begin();
}
auto end() {
return extent_index.end();
}
auto size() const {
return extent_index.size();
}
auto get_bytes() const {
return bytes;
}
~ExtentIndex() {
assert(extent_index.empty());
assert(bytes == 0);
}
private:
uint64_t bytes = 0;
};
class ChildableCachedExtent;
class LogicalCachedExtent;
class child_pos_t {
public:
child_pos_t(CachedExtentRef stable_parent, uint16_t pos)
: stable_parent(stable_parent), pos(pos) {}
template <typename parent_t>
TCachedExtentRef<parent_t> get_parent() {
ceph_assert(stable_parent);
return stable_parent->template cast<parent_t>();
}
uint16_t get_pos() {
return pos;
}
void link_child(ChildableCachedExtent *c);
private:
CachedExtentRef stable_parent;
uint16_t pos = std::numeric_limits<uint16_t>::max();
};
using get_child_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
template <typename T>
struct get_child_ret_t {
std::variant<child_pos_t, get_child_ertr::future<TCachedExtentRef<T>>> ret;
get_child_ret_t(child_pos_t pos)
: ret(std::move(pos)) {}
get_child_ret_t(get_child_ertr::future<TCachedExtentRef<T>> child)
: ret(std::move(child)) {}
bool has_child() const {
return ret.index() == 1;
}
child_pos_t &get_child_pos() {
ceph_assert(ret.index() == 0);
return std::get<0>(ret);
}
get_child_ertr::future<TCachedExtentRef<T>> &get_child_fut() {
ceph_assert(ret.index() == 1);
return std::get<1>(ret);
}
};
template <typename key_t, typename>
class PhysicalNodeMapping;
template <typename key_t, typename val_t>
using PhysicalNodeMappingRef = std::unique_ptr<PhysicalNodeMapping<key_t, val_t>>;
template <typename key_t, typename val_t>
class PhysicalNodeMapping {
public:
virtual extent_len_t get_length() const = 0;
virtual extent_types_t get_type() const = 0;
virtual val_t get_val() const = 0;
virtual key_t get_key() const = 0;
virtual PhysicalNodeMappingRef<key_t, val_t> duplicate() const = 0;
virtual bool has_been_invalidated() const = 0;
virtual CachedExtentRef get_parent() const = 0;
virtual uint16_t get_pos() const = 0;
virtual get_child_ret_t<LogicalCachedExtent>
get_logical_extent(Transaction &t) = 0;
void link_child(ChildableCachedExtent *c) {
ceph_assert(child_pos);
child_pos->link_child(c);
}
virtual ~PhysicalNodeMapping() {}
protected:
std::optional<child_pos_t> child_pos = std::nullopt;
};
using LBAMapping = PhysicalNodeMapping<laddr_t, paddr_t>;
using LBAMappingRef = PhysicalNodeMappingRef<laddr_t, paddr_t>;
std::ostream &operator<<(std::ostream &out, const LBAMapping &rhs);
using lba_pin_list_t = std::list<LBAMappingRef>;
std::ostream &operator<<(std::ostream &out, const lba_pin_list_t &rhs);
using BackrefMapping = PhysicalNodeMapping<paddr_t, laddr_t>;
using BackrefMappingRef = PhysicalNodeMappingRef<paddr_t, laddr_t>;
using backref_pin_list_t = std::list<BackrefMappingRef>;
/**
* RetiredExtentPlaceholder
*
* Cache::retire_extent_addr(Transaction&, paddr_t, extent_len_t) can retire an
* extent not currently in cache. In that case, in order to detect transaction
* invalidation, we need to add a placeholder to the cache to create the
* mapping back to the transaction. And whenever there is a transaction tries
* to read the placeholder extent out, Cache is responsible to replace the
* placeholder by the real one. Anyway, No placeholder extents should escape
* the Cache interface boundary.
*/
class RetiredExtentPlaceholder : public CachedExtent {
public:
RetiredExtentPlaceholder(extent_len_t length)
: CachedExtent(CachedExtent::retired_placeholder_t{}, length) {}
CachedExtentRef duplicate_for_write(Transaction&) final {
ceph_assert(0 == "Should never happen for a placeholder");
return CachedExtentRef();
}
ceph::bufferlist get_delta() final {
ceph_assert(0 == "Should never happen for a placeholder");
return ceph::bufferlist();
}
static constexpr extent_types_t TYPE = extent_types_t::RETIRED_PLACEHOLDER;
extent_types_t get_type() const final {
return TYPE;
}
void apply_delta_and_adjust_crc(
paddr_t base, const ceph::bufferlist &bl) final {
ceph_assert(0 == "Should never happen for a placeholder");
}
bool is_logical() const final {
return false;
}
std::ostream &print_detail(std::ostream &out) const final {
return out << ", RetiredExtentPlaceholder";
}
void on_delta_write(paddr_t record_block_offset) final {
ceph_assert(0 == "Should never happen for a placeholder");
}
};
class parent_tracker_t
: public boost::intrusive_ref_counter<
parent_tracker_t, boost::thread_unsafe_counter> {
public:
parent_tracker_t(CachedExtentRef parent)
: parent(parent) {}
parent_tracker_t(CachedExtent* parent)
: parent(parent) {}
~parent_tracker_t();
template <typename T = CachedExtent>
TCachedExtentRef<T> get_parent() const {
ceph_assert(parent);
if constexpr (std::is_same_v<T, CachedExtent>) {
return parent;
} else {
return parent->template cast<T>();
}
}
void reset_parent(CachedExtentRef p) {
parent = p;
}
bool is_valid() const {
return parent && parent->is_valid();
}
private:
CachedExtentRef parent;
};
std::ostream &operator<<(std::ostream &, const parent_tracker_t &);
using parent_tracker_ref = boost::intrusive_ptr<parent_tracker_t>;
class ChildableCachedExtent : public CachedExtent {
public:
template <typename... T>
ChildableCachedExtent(T&&... t) : CachedExtent(std::forward<T>(t)...) {}
bool has_parent_tracker() const {
return (bool)parent_tracker;
}
void reset_parent_tracker(parent_tracker_t *p = nullptr) {
parent_tracker.reset(p);
}
bool is_parent_valid() const {
return parent_tracker && parent_tracker->is_valid();
}
template <typename T = CachedExtent>
TCachedExtentRef<T> get_parent_node() const {
assert(parent_tracker);
return parent_tracker->template get_parent<T>();
}
void take_prior_parent_tracker() {
auto &prior = (ChildableCachedExtent&)(*get_prior_instance());
parent_tracker = prior.parent_tracker;
}
std::ostream &print_detail(std::ostream &out) const final;
private:
parent_tracker_ref parent_tracker;
virtual std::ostream &_print_detail(std::ostream &out) const {
return out;
}
};
/**
* LogicalCachedExtent
*
* CachedExtent with associated lba mapping.
*
* Users of TransactionManager should be using extents derived from
* LogicalCachedExtent.
*/
class LogicalCachedExtent : public ChildableCachedExtent {
public:
template <typename... T>
LogicalCachedExtent(T&&... t)
: ChildableCachedExtent(std::forward<T>(t)...)
{}
bool has_laddr() const {
return laddr != L_ADDR_NULL;
}
laddr_t get_laddr() const {
assert(laddr != L_ADDR_NULL);
return laddr;
}
void set_laddr(laddr_t nladdr) {
laddr = nladdr;
}
void apply_delta_and_adjust_crc(
paddr_t base, const ceph::bufferlist &bl) final {
apply_delta(bl);
set_last_committed_crc(get_crc32c());
}
bool is_logical() const final {
return true;
}
std::ostream &_print_detail(std::ostream &out) const final;
void on_replace_prior(Transaction &t) final;
virtual ~LogicalCachedExtent();
protected:
virtual void apply_delta(const ceph::bufferlist &bl) = 0;
virtual std::ostream &print_detail_l(std::ostream &out) const {
return out;
}
virtual void logical_on_delta_write() {}
void on_delta_write(paddr_t record_block_offset) final {
assert(is_exist_mutation_pending() ||
get_prior_instance());
logical_on_delta_write();
}
private:
laddr_t laddr = L_ADDR_NULL;
};
using LogicalCachedExtentRef = TCachedExtentRef<LogicalCachedExtent>;
struct ref_laddr_cmp {
using is_transparent = laddr_t;
bool operator()(const LogicalCachedExtentRef &lhs,
const LogicalCachedExtentRef &rhs) const {
return lhs->get_laddr() < rhs->get_laddr();
}
bool operator()(const laddr_t &lhs,
const LogicalCachedExtentRef &rhs) const {
return lhs < rhs->get_laddr();
}
bool operator()(const LogicalCachedExtentRef &lhs,
const laddr_t &rhs) const {
return lhs->get_laddr() < rhs;
}
};
template <typename T>
read_set_item_t<T>::read_set_item_t(T *t, CachedExtentRef ref)
: t(t), ref(ref)
{}
template <typename T>
inline bool read_set_item_t<T>::cmp_t::operator()(
const read_set_item_t<T> &lhs, const read_set_item_t<T> &rhs) const {
return lhs.ref->poffset < rhs.ref->poffset;
}
template <typename T>
inline bool read_set_item_t<T>::cmp_t::operator()(
const paddr_t &lhs, const read_set_item_t<T> &rhs) const {
return lhs < rhs.ref->poffset;
}
template <typename T>
inline bool read_set_item_t<T>::cmp_t::operator()(
const read_set_item_t<T> &lhs, const paddr_t &rhs) const {
return lhs.ref->poffset < rhs;
}
using lextent_set_t = addr_extent_set_base_t<
laddr_t,
LogicalCachedExtentRef,
ref_laddr_cmp
>;
template <typename T>
using lextent_list_t = addr_extent_list_base_t<
laddr_t, TCachedExtentRef<T>>;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::lba_pin_list_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::CachedExtent> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::LogicalCachedExtent> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::LBAMapping> : fmt::ostream_formatter {};
#endif
| 36,763 | 27.744332 | 106 | h |
null | ceph-main/src/crimson/os/seastore/collection_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
//
#include "crimson/os/seastore/collection_manager.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/collection_manager/flat_collection_manager.h"
namespace crimson::os::seastore::collection_manager {
CollectionManagerRef create_coll_manager(TransactionManager &trans_manager) {
return CollectionManagerRef(new FlatCollectionManager(trans_manager));
}
}
| 494 | 32 | 77 | cc |
null | ceph-main/src/crimson/os/seastore/collection_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include "osd/osd_types.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore {
struct coll_info_t {
unsigned split_bits;
coll_info_t(unsigned bits)
: split_bits(bits) {}
bool operator==(const coll_info_t &rhs) const {
return split_bits == rhs.split_bits;
}
};
/// Interface for maintaining set of collections
class CollectionManager {
public:
using base_iertr = TransactionManager::read_extent_iertr;
/// Initialize collection manager instance for an empty store
using mkfs_iertr = TransactionManager::alloc_extent_iertr;
using mkfs_ret = mkfs_iertr::future<coll_root_t>;
virtual mkfs_ret mkfs(
Transaction &t) = 0;
/// Create collection
using create_iertr = base_iertr;
using create_ret = create_iertr::future<>;
virtual create_ret create(
coll_root_t &root,
Transaction &t,
coll_t cid,
coll_info_t info
) = 0;
/// List collections with info
using list_iertr = base_iertr;
using list_ret_bare = std::vector<std::pair<coll_t, coll_info_t>>;
using list_ret = list_iertr::future<list_ret_bare>;
virtual list_ret list(
const coll_root_t &root,
Transaction &t) = 0;
/// Remove cid
using remove_iertr = base_iertr;
using remove_ret = remove_iertr::future<>;
virtual remove_ret remove(
const coll_root_t &coll_root,
Transaction &t,
coll_t cid) = 0;
/// Update info for cid
using update_iertr = base_iertr;
using update_ret = base_iertr::future<>;
virtual update_ret update(
const coll_root_t &coll_root,
Transaction &t,
coll_t cid,
coll_info_t info
) = 0;
virtual ~CollectionManager() {}
};
using CollectionManagerRef = std::unique_ptr<CollectionManager>;
namespace collection_manager {
/* creat CollectionMapManager for Collection */
CollectionManagerRef create_coll_manager(
TransactionManager &trans_manager);
}
}
| 2,088 | 23.576471 | 70 | h |
null | ceph-main/src/crimson/os/seastore/device.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "device.h"
#include "segment_manager.h"
#include "random_block_manager.h"
#include "random_block_manager/rbm_device.h"
namespace crimson::os::seastore {
std::ostream& operator<<(std::ostream& out, const device_spec_t& ds)
{
return out << "device_spec("
<< "magic=" << ds.magic
<< ", dtype=" << ds.dtype
<< ", " << device_id_printer_t{ds.id}
<< ")";
}
std::ostream& operator<<(std::ostream& out, const device_config_t& conf)
{
out << "device_config_t("
<< "major_dev=" << conf.major_dev
<< ", spec=" << conf.spec
<< ", meta=" << conf.meta
<< ", secondary(";
for (const auto& [k, v] : conf.secondary_devices) {
out << device_id_printer_t{k}
<< ": " << v << ", ";
}
return out << "))";
}
seastar::future<DeviceRef>
Device::make_device(const std::string& device, device_type_t dtype)
{
if (get_default_backend_of_device(dtype) == backend_type_t::SEGMENTED) {
return SegmentManager::get_segment_manager(device, dtype
).then([](DeviceRef ret) {
return ret;
});
}
assert(get_default_backend_of_device(dtype) == backend_type_t::RANDOM_BLOCK);
return get_rb_device(device
).then([](DeviceRef ret) {
return ret;
});
}
}
| 1,359 | 25.153846 | 79 | cc |
null | ceph-main/src/crimson/os/seastore/device.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include "include/buffer_fwd.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
using magic_t = uint64_t;
struct device_spec_t {
magic_t magic = 0;
device_type_t dtype = device_type_t::NONE;
device_id_t id = DEVICE_ID_NULL;
DENC(device_spec_t, v, p) {
DENC_START(1, 1, p);
denc(v.magic, p);
denc(v.dtype, p);
denc(v.id, p);
DENC_FINISH(p);
}
};
std::ostream& operator<<(std::ostream&, const device_spec_t&);
using secondary_device_set_t =
std::map<device_id_t, device_spec_t>;
struct device_config_t {
bool major_dev = false;
device_spec_t spec;
seastore_meta_t meta;
secondary_device_set_t secondary_devices;
DENC(device_config_t, v, p) {
DENC_START(1, 1, p);
denc(v.major_dev, p);
denc(v.spec, p);
denc(v.meta, p);
denc(v.secondary_devices, p);
DENC_FINISH(p);
}
static device_config_t create_primary(
uuid_d new_osd_fsid,
device_id_t id,
device_type_t d_type,
secondary_device_set_t sds) {
return device_config_t{
true,
device_spec_t{
(magic_t)std::rand(),
d_type,
id},
seastore_meta_t{new_osd_fsid},
sds};
}
static device_config_t create_secondary(
uuid_d new_osd_fsid,
device_id_t id,
device_type_t d_type,
magic_t magic) {
return device_config_t{
false,
device_spec_t{
magic,
d_type,
id},
seastore_meta_t{new_osd_fsid},
secondary_device_set_t()};
}
};
std::ostream& operator<<(std::ostream&, const device_config_t&);
class Device;
using DeviceRef = std::unique_ptr<Device>;
/**
* Device
*
* Represents a general device regardless of the underlying medium.
*/
class Device {
// interfaces used by device
public:
virtual ~Device() {}
virtual seastar::future<> start() {
return seastar::now();
}
virtual seastar::future<> stop() {
return seastar::now();
}
// called on the shard to get this shard device;
virtual Device& get_sharded_device() {
return *this;
}
using access_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::permission_denied,
crimson::ct_error::enoent>;
using mkfs_ertr = access_ertr;
using mkfs_ret = mkfs_ertr::future<>;
virtual mkfs_ret mkfs(device_config_t) = 0;
using mount_ertr = access_ertr;
using mount_ret = access_ertr::future<>;
virtual mount_ret mount() = 0;
static seastar::future<DeviceRef> make_device(
const std::string &device,
device_type_t dtype);
// interfaces used by each device shard
public:
virtual device_id_t get_device_id() const = 0;
virtual magic_t get_magic() const = 0;
virtual device_type_t get_device_type() const = 0;
virtual backend_type_t get_backend_type() const = 0;
virtual const seastore_meta_t &get_meta() const = 0;
virtual extent_len_t get_block_size() const = 0;
virtual std::size_t get_available_size() const = 0;
virtual secondary_device_set_t& get_secondary_devices() = 0;
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual close_ertr::future<> close() = 0;
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
virtual read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) = 0;
read_ertr::future<ceph::bufferptr> read(
paddr_t addr,
size_t len
) {
auto ptrref = std::make_unique<ceph::bufferptr>(
buffer::create_page_aligned(len));
return read(addr, len, *ptrref
).safe_then([ptrref=std::move(ptrref)]() mutable {
return read_ertr::make_ready_future<bufferptr>(std::move(*ptrref));
});
}
};
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::device_spec_t)
WRITE_CLASS_DENC(crimson::os::seastore::device_config_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::device_config_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::device_spec_t> : fmt::ostream_formatter {};
#endif
| 4,399 | 24 | 102 | h |
null | ceph-main/src/crimson/os/seastore/extent_placement_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#include "crimson/os/seastore/extent_placement_manager.h"
#include "crimson/common/config_proxy.h"
#include "crimson/os/seastore/logging.h"
SET_SUBSYS(seastore_epm);
namespace crimson::os::seastore {
SegmentedOolWriter::SegmentedOolWriter(
data_category_t category,
rewrite_gen_t gen,
SegmentProvider& sp,
SegmentSeqAllocator &ssa)
: segment_allocator(nullptr, category, gen, sp, ssa),
record_submitter(crimson::common::get_conf<uint64_t>(
"seastore_journal_iodepth_limit"),
crimson::common::get_conf<uint64_t>(
"seastore_journal_batch_capacity"),
crimson::common::get_conf<Option::size_t>(
"seastore_journal_batch_flush_size"),
crimson::common::get_conf<double>(
"seastore_journal_batch_preferred_fullness"),
segment_allocator)
{
}
SegmentedOolWriter::alloc_write_ertr::future<>
SegmentedOolWriter::write_record(
Transaction& t,
record_t&& record,
std::list<LogicalCachedExtentRef>&& extents,
bool with_atomic_roll_segment)
{
LOG_PREFIX(SegmentedOolWriter::write_record);
assert(extents.size());
assert(extents.size() == record.extents.size());
assert(!record.deltas.size());
// account transactional ool writes before write()
auto& stats = t.get_ool_write_stats();
stats.extents.num += extents.size();
stats.extents.bytes += record.size.dlength;
stats.md_bytes += record.size.get_raw_mdlength();
stats.num_records += 1;
return record_submitter.submit(
std::move(record),
with_atomic_roll_segment
).safe_then([this, FNAME, &t, extents=std::move(extents)
](record_locator_t ret) mutable {
DEBUGT("{} finish with {} and {} extents",
t, segment_allocator.get_name(),
ret, extents.size());
paddr_t extent_addr = ret.record_block_base;
for (auto& extent : extents) {
TRACET("{} ool extent written at {} -- {}",
t, segment_allocator.get_name(),
extent_addr, *extent);
t.update_delayed_ool_extent_addr(extent, extent_addr);
extent_addr = extent_addr.as_seg_paddr().add_offset(
extent->get_length());
}
});
}
SegmentedOolWriter::alloc_write_iertr::future<>
SegmentedOolWriter::do_write(
Transaction& t,
std::list<LogicalCachedExtentRef>& extents)
{
LOG_PREFIX(SegmentedOolWriter::do_write);
assert(!extents.empty());
if (!record_submitter.is_available()) {
DEBUGT("{} extents={} wait ...",
t, segment_allocator.get_name(),
extents.size());
return trans_intr::make_interruptible(
record_submitter.wait_available()
).si_then([this, &t, &extents] {
return do_write(t, extents);
});
}
record_t record(TRANSACTION_TYPE_NULL);
std::list<LogicalCachedExtentRef> pending_extents;
auto commit_time = seastar::lowres_system_clock::now();
for (auto it = extents.begin(); it != extents.end();) {
auto& extent = *it;
record_size_t wouldbe_rsize = record.size;
wouldbe_rsize.account_extent(extent->get_bptr().length());
using action_t = journal::RecordSubmitter::action_t;
action_t action = record_submitter.check_action(wouldbe_rsize);
if (action == action_t::ROLL) {
auto num_extents = pending_extents.size();
DEBUGT("{} extents={} submit {} extents and roll, unavailable ...",
t, segment_allocator.get_name(),
extents.size(), num_extents);
auto fut_write = alloc_write_ertr::now();
if (num_extents > 0) {
assert(record_submitter.check_action(record.size) !=
action_t::ROLL);
fut_write = write_record(
t, std::move(record), std::move(pending_extents),
true/* with_atomic_roll_segment */);
}
return trans_intr::make_interruptible(
record_submitter.roll_segment(
).safe_then([fut_write=std::move(fut_write)]() mutable {
return std::move(fut_write);
})
).si_then([this, &t, &extents] {
return do_write(t, extents);
});
}
TRACET("{} extents={} add extent to record -- {}",
t, segment_allocator.get_name(),
extents.size(), *extent);
ceph::bufferlist bl;
extent->prepare_write();
bl.append(extent->get_bptr());
assert(bl.length() == extent->get_length());
auto modify_time = extent->get_modify_time();
if (modify_time == NULL_TIME) {
modify_time = commit_time;
}
record.push_back(
extent_t{
extent->get_type(),
extent->get_laddr(),
std::move(bl)},
modify_time);
pending_extents.push_back(extent);
it = extents.erase(it);
assert(record_submitter.check_action(record.size) == action);
if (action == action_t::SUBMIT_FULL) {
DEBUGT("{} extents={} submit {} extents ...",
t, segment_allocator.get_name(),
extents.size(), pending_extents.size());
return trans_intr::make_interruptible(
write_record(t, std::move(record), std::move(pending_extents))
).si_then([this, &t, &extents] {
if (!extents.empty()) {
return do_write(t, extents);
} else {
return alloc_write_iertr::now();
}
});
}
// SUBMIT_NOT_FULL: evaluate the next extent
}
auto num_extents = pending_extents.size();
DEBUGT("{} submit the rest {} extents ...",
t, segment_allocator.get_name(),
num_extents);
assert(num_extents > 0);
return trans_intr::make_interruptible(
write_record(t, std::move(record), std::move(pending_extents)));
}
SegmentedOolWriter::alloc_write_iertr::future<>
SegmentedOolWriter::alloc_write_ool_extents(
Transaction& t,
std::list<LogicalCachedExtentRef>& extents)
{
if (extents.empty()) {
return alloc_write_iertr::now();
}
return seastar::with_gate(write_guard, [this, &t, &extents] {
return do_write(t, extents);
});
}
void ExtentPlacementManager::init(
JournalTrimmerImplRef &&trimmer,
AsyncCleanerRef &&cleaner,
AsyncCleanerRef &&cold_cleaner)
{
writer_refs.clear();
auto cold_segment_cleaner = dynamic_cast<SegmentCleaner*>(cold_cleaner.get());
dynamic_max_rewrite_generation = MIN_COLD_GENERATION - 1;
if (cold_segment_cleaner) {
dynamic_max_rewrite_generation = MAX_REWRITE_GENERATION;
}
if (trimmer->get_journal_type() == journal_type_t::SEGMENTED) {
auto segment_cleaner = dynamic_cast<SegmentCleaner*>(cleaner.get());
ceph_assert(segment_cleaner != nullptr);
auto num_writers = generation_to_writer(dynamic_max_rewrite_generation + 1);
data_writers_by_gen.resize(num_writers, {});
for (rewrite_gen_t gen = OOL_GENERATION; gen < MIN_COLD_GENERATION; ++gen) {
writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
data_category_t::DATA, gen, *segment_cleaner,
*ool_segment_seq_allocator));
data_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
}
md_writers_by_gen.resize(num_writers, {});
for (rewrite_gen_t gen = OOL_GENERATION; gen < MIN_COLD_GENERATION; ++gen) {
writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
data_category_t::METADATA, gen, *segment_cleaner,
*ool_segment_seq_allocator));
md_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
}
for (auto *device : segment_cleaner->get_segment_manager_group()
->get_segment_managers()) {
add_device(device);
}
} else {
assert(trimmer->get_journal_type() == journal_type_t::RANDOM_BLOCK);
auto rb_cleaner = dynamic_cast<RBMCleaner*>(cleaner.get());
ceph_assert(rb_cleaner != nullptr);
auto num_writers = generation_to_writer(dynamic_max_rewrite_generation + 1);
data_writers_by_gen.resize(num_writers, {});
md_writers_by_gen.resize(num_writers, {});
writer_refs.emplace_back(std::make_unique<RandomBlockOolWriter>(
rb_cleaner));
// TODO: implement eviction in RBCleaner and introduce further writers
data_writers_by_gen[generation_to_writer(OOL_GENERATION)] = writer_refs.back().get();
md_writers_by_gen[generation_to_writer(OOL_GENERATION)] = writer_refs.back().get();
for (auto *rb : rb_cleaner->get_rb_group()->get_rb_managers()) {
add_device(rb->get_device());
}
}
if (cold_segment_cleaner) {
for (rewrite_gen_t gen = MIN_COLD_GENERATION; gen < REWRITE_GENERATIONS; ++gen) {
writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
data_category_t::DATA, gen, *cold_segment_cleaner,
*ool_segment_seq_allocator));
data_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
}
for (rewrite_gen_t gen = MIN_COLD_GENERATION; gen < REWRITE_GENERATIONS; ++gen) {
writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
data_category_t::METADATA, gen, *cold_segment_cleaner,
*ool_segment_seq_allocator));
md_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
}
for (auto *device : cold_segment_cleaner->get_segment_manager_group()
->get_segment_managers()) {
add_device(device);
}
}
background_process.init(std::move(trimmer),
std::move(cleaner),
std::move(cold_cleaner));
if (cold_segment_cleaner) {
ceph_assert(get_main_backend_type() == backend_type_t::SEGMENTED);
ceph_assert(background_process.has_cold_tier());
} else {
ceph_assert(!background_process.has_cold_tier());
}
}
void ExtentPlacementManager::set_primary_device(Device *device)
{
ceph_assert(primary_device == nullptr);
primary_device = device;
ceph_assert(devices_by_id[device->get_device_id()] == device);
}
ExtentPlacementManager::open_ertr::future<>
ExtentPlacementManager::open_for_write()
{
LOG_PREFIX(ExtentPlacementManager::open_for_write);
INFO("started with {} devices", num_devices);
ceph_assert(primary_device != nullptr);
return crimson::do_for_each(data_writers_by_gen, [](auto &writer) {
if (writer) {
return writer->open();
}
return open_ertr::now();
}).safe_then([this] {
return crimson::do_for_each(md_writers_by_gen, [](auto &writer) {
if (writer) {
return writer->open();
}
return open_ertr::now();
});
});
}
ExtentPlacementManager::dispatch_result_t
ExtentPlacementManager::dispatch_delayed_extents(Transaction &t)
{
dispatch_result_t res;
res.delayed_extents = t.get_delayed_alloc_list();
// init projected usage
for (auto &extent : t.get_inline_block_list()) {
if (extent->is_valid()) {
res.usage.inline_usage += extent->get_length();
res.usage.cleaner_usage.main_usage += extent->get_length();
}
}
for (auto &extent : res.delayed_extents) {
if (dispatch_delayed_extent(extent)) {
res.usage.inline_usage += extent->get_length();
res.usage.cleaner_usage.main_usage += extent->get_length();
t.mark_delayed_extent_inline(extent);
} else {
if (extent->get_rewrite_generation() < MIN_COLD_GENERATION) {
res.usage.cleaner_usage.main_usage += extent->get_length();
} else {
assert(background_process.has_cold_tier());
res.usage.cleaner_usage.cold_ool_usage += extent->get_length();
}
t.mark_delayed_extent_ool(extent);
auto writer_ptr = get_writer(
extent->get_user_hint(),
get_extent_category(extent->get_type()),
extent->get_rewrite_generation());
res.alloc_map[writer_ptr].emplace_back(extent);
}
}
return res;
}
ExtentPlacementManager::alloc_paddr_iertr::future<>
ExtentPlacementManager::write_delayed_ool_extents(
Transaction& t,
extents_by_writer_t& alloc_map) {
return trans_intr::do_for_each(alloc_map, [&t](auto& p) {
auto writer = p.first;
auto& extents = p.second;
return writer->alloc_write_ool_extents(t, extents);
});
}
ExtentPlacementManager::alloc_paddr_iertr::future<>
ExtentPlacementManager::write_preallocated_ool_extents(
Transaction &t,
std::list<LogicalCachedExtentRef> extents)
{
LOG_PREFIX(ExtentPlacementManager::write_preallocated_ool_extents);
DEBUGT("start with {} allocated extents",
t, extents.size());
assert(writer_refs.size());
return seastar::do_with(
std::map<ExtentOolWriter*, std::list<LogicalCachedExtentRef>>(),
[this, &t, extents=std::move(extents)](auto& alloc_map) {
for (auto& extent : extents) {
auto writer_ptr = get_writer(
extent->get_user_hint(),
get_extent_category(extent->get_type()),
extent->get_rewrite_generation());
alloc_map[writer_ptr].emplace_back(extent);
}
return trans_intr::do_for_each(alloc_map, [&t](auto& p) {
auto writer = p.first;
auto& extents = p.second;
return writer->alloc_write_ool_extents(t, extents);
});
});
}
ExtentPlacementManager::close_ertr::future<>
ExtentPlacementManager::close()
{
LOG_PREFIX(ExtentPlacementManager::close);
INFO("started");
return crimson::do_for_each(data_writers_by_gen, [](auto &writer) {
if (writer) {
return writer->close();
}
return close_ertr::now();
}).safe_then([this] {
return crimson::do_for_each(md_writers_by_gen, [](auto &writer) {
if (writer) {
return writer->close();
}
return close_ertr::now();
});
});
}
void ExtentPlacementManager::BackgroundProcess::log_state(const char *caller) const
{
LOG_PREFIX(BackgroundProcess::log_state);
DEBUG("caller {}, {}, {}",
caller,
JournalTrimmerImpl::stat_printer_t{*trimmer, true},
AsyncCleaner::stat_printer_t{*main_cleaner, true});
if (has_cold_tier()) {
DEBUG("caller {}, cold_cleaner: {}",
caller,
AsyncCleaner::stat_printer_t{*cold_cleaner, true});
}
}
void ExtentPlacementManager::BackgroundProcess::start_background()
{
LOG_PREFIX(BackgroundProcess::start_background);
INFO("{}, {}",
JournalTrimmerImpl::stat_printer_t{*trimmer, true},
AsyncCleaner::stat_printer_t{*main_cleaner, true});
if (has_cold_tier()) {
INFO("cold_cleaner: {}",
AsyncCleaner::stat_printer_t{*cold_cleaner, true});
}
ceph_assert(trimmer->check_is_ready());
ceph_assert(state == state_t::SCAN_SPACE);
assert(!is_running());
process_join = seastar::now();
state = state_t::RUNNING;
assert(is_running());
process_join = run();
}
seastar::future<>
ExtentPlacementManager::BackgroundProcess::stop_background()
{
return seastar::futurize_invoke([this] {
if (!is_running()) {
if (state != state_t::HALT) {
state = state_t::STOP;
}
return seastar::now();
}
auto ret = std::move(*process_join);
process_join.reset();
state = state_t::HALT;
assert(!is_running());
do_wake_background();
return ret;
}).then([this] {
LOG_PREFIX(BackgroundProcess::stop_background);
INFO("done, {}, {}",
JournalTrimmerImpl::stat_printer_t{*trimmer, true},
AsyncCleaner::stat_printer_t{*main_cleaner, true});
if (has_cold_tier()) {
INFO("done, cold_cleaner: {}",
AsyncCleaner::stat_printer_t{*cold_cleaner, true});
}
// run_until_halt() can be called at HALT
});
}
seastar::future<>
ExtentPlacementManager::BackgroundProcess::run_until_halt()
{
ceph_assert(state == state_t::HALT);
assert(!is_running());
if (is_running_until_halt) {
return seastar::now();
}
is_running_until_halt = true;
return seastar::do_until(
[this] {
log_state("run_until_halt");
assert(is_running_until_halt);
if (background_should_run()) {
return false;
} else {
is_running_until_halt = false;
return true;
}
},
[this] {
return do_background_cycle();
}
);
}
seastar::future<>
ExtentPlacementManager::BackgroundProcess::reserve_projected_usage(
io_usage_t usage)
{
if (!is_ready()) {
return seastar::now();
}
ceph_assert(!blocking_io);
// The pipeline configuration prevents another IO from entering
// prepare until the prior one exits and clears this.
++stats.io_count;
auto res = try_reserve_io(usage);
if (res.is_successful()) {
return seastar::now();
} else {
abort_io_usage(usage, res);
if (!res.reserve_inline_success) {
++stats.io_blocked_count_trim;
}
if (!res.cleaner_result.is_successful()) {
++stats.io_blocked_count_clean;
}
++stats.io_blocking_num;
++stats.io_blocked_count;
stats.io_blocked_sum += stats.io_blocking_num;
return seastar::repeat([this, usage] {
blocking_io = seastar::promise<>();
return blocking_io->get_future(
).then([this, usage] {
ceph_assert(!blocking_io);
auto res = try_reserve_io(usage);
if (res.is_successful()) {
assert(stats.io_blocking_num == 1);
--stats.io_blocking_num;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
} else {
abort_io_usage(usage, res);
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
}
});
});
}
}
seastar::future<>
ExtentPlacementManager::BackgroundProcess::run()
{
assert(is_running());
return seastar::repeat([this] {
if (!is_running()) {
log_state("run(exit)");
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
return seastar::futurize_invoke([this] {
if (background_should_run()) {
log_state("run(background)");
return do_background_cycle();
} else {
log_state("run(block)");
ceph_assert(!blocking_background);
blocking_background = seastar::promise<>();
return blocking_background->get_future();
}
}).then([] {
return seastar::stop_iteration::no;
});
});
}
/**
* Reservation Process
*
* Most of transctions need to reserve its space usage before performing the
* ool writes and committing transactions. If the space reservation is
* unsuccessful, the current transaction is blocked, and waits for new
* background transactions to finish.
*
* The following are the reservation requirements for each transaction type:
* 1. MUTATE transaction:
* (1) inline usage on the trimmer,
* (2) inline usage with OOL usage on the main cleaner,
* (3) cold OOL usage to the cold cleaner(if it exists).
* 2. TRIM_DIRTY/TRIM_ALLOC transaction:
* (1) all extents usage on the main cleaner,
* (2) usage on the cold cleaner(if it exists)
* 3. CLEANER_MAIN:
* (1) cleaned extents size on the cold cleaner(if it exists).
* 4. CLEANER_COLD transction does not require space reservation.
*
* The reserve implementation should satisfy the following conditions:
* 1. The reservation should be atomic. If a reservation involves several reservations,
* such as the MUTATE transaction that needs to reserve space on both the trimmer
* and cleaner at the same time, the successful condition is that all of its
* sub-reservations succeed. If one or more operations fail, the entire reservation
* fails, and the successful operation should be reverted.
* 2. The reserve/block relationship should form a DAG to avoid deadlock. For example,
* TRIM_ALLOC transaction might be blocked by cleaner due to the failure of reserving
* on the cleaner. In such cases, the cleaner must not reserve space on the trimmer
* since the trimmer is already blocked by itself.
*
* Finally the reserve relationship can be represented as follows:
*
* +-------------------------+----------------+
* | | |
* | v v
* MUTATE ---> TRIM_* ---> CLEANER_MAIN ---> CLEANER_COLD
* | ^
* | |
* +--------------------------------+
*/
bool ExtentPlacementManager::BackgroundProcess::try_reserve_cold(std::size_t usage)
{
if (has_cold_tier()) {
return cold_cleaner->try_reserve_projected_usage(usage);
} else {
assert(usage == 0);
return true;
}
}
void ExtentPlacementManager::BackgroundProcess::abort_cold_usage(
std::size_t usage, bool success)
{
if (has_cold_tier() && success) {
cold_cleaner->release_projected_usage(usage);
}
}
reserve_cleaner_result_t
ExtentPlacementManager::BackgroundProcess::try_reserve_cleaner(
const cleaner_usage_t &usage)
{
return {
main_cleaner->try_reserve_projected_usage(usage.main_usage),
try_reserve_cold(usage.cold_ool_usage)
};
}
void ExtentPlacementManager::BackgroundProcess::abort_cleaner_usage(
const cleaner_usage_t &usage,
const reserve_cleaner_result_t &result)
{
if (result.reserve_main_success) {
main_cleaner->release_projected_usage(usage.main_usage);
}
abort_cold_usage(usage.cold_ool_usage, result.reserve_cold_success);
}
reserve_io_result_t
ExtentPlacementManager::BackgroundProcess::try_reserve_io(
const io_usage_t &usage)
{
return {
trimmer->try_reserve_inline_usage(usage.inline_usage),
try_reserve_cleaner(usage.cleaner_usage)
};
}
void ExtentPlacementManager::BackgroundProcess::abort_io_usage(
const io_usage_t &usage,
const reserve_io_result_t &result)
{
if (result.reserve_inline_success) {
trimmer->release_inline_usage(usage.inline_usage);
}
abort_cleaner_usage(usage.cleaner_usage, result.cleaner_result);
}
seastar::future<>
ExtentPlacementManager::BackgroundProcess::do_background_cycle()
{
assert(is_ready());
bool should_trim = trimmer->should_trim();
bool proceed_trim = false;
auto trim_size = trimmer->get_trim_size_per_cycle();
cleaner_usage_t trim_usage{
trim_size,
// We take a cautious policy here that the trimmer also reserves
// the max value on cold cleaner even if no extents will be rewritten
// to the cold tier. Cleaner also takes the same policy.
// The reason is that we don't know the exact value of reservation until
// the construction of trimmer transaction completes after which the reservation
// might fail then the trimmer is possible to be invalidated by cleaner.
// Reserving the max size at first could help us avoid these trouble.
has_cold_tier() ? trim_size : 0
};
reserve_cleaner_result_t trim_reserve_res;
if (should_trim) {
trim_reserve_res = try_reserve_cleaner(trim_usage);
if (trim_reserve_res.is_successful()) {
proceed_trim = true;
} else {
abort_cleaner_usage(trim_usage, trim_reserve_res);
}
}
if (proceed_trim) {
return trimmer->trim(
).finally([this, trim_usage] {
abort_cleaner_usage(trim_usage, {true, true});
});
} else {
bool should_clean_main =
main_cleaner_should_run() ||
// make sure cleaner will start
// when the trimmer should run but
// failed to reserve space.
(should_trim && !proceed_trim &&
!trim_reserve_res.reserve_main_success);
bool proceed_clean_main = false;
auto main_cold_usage = main_cleaner->get_reclaim_size_per_cycle();
if (should_clean_main) {
if (has_cold_tier()) {
proceed_clean_main = try_reserve_cold(main_cold_usage);
} else {
proceed_clean_main = true;
}
}
bool proceed_clean_cold = false;
if (has_cold_tier() &&
(cold_cleaner->should_clean_space() ||
(should_trim && !proceed_trim &&
!trim_reserve_res.reserve_cold_success) ||
(should_clean_main && !proceed_clean_main))) {
proceed_clean_cold = true;
}
if (!proceed_clean_main && !proceed_clean_cold) {
ceph_abort("no background process will start");
}
return seastar::when_all(
[this, proceed_clean_main, main_cold_usage] {
if (!proceed_clean_main) {
return seastar::now();
}
return main_cleaner->clean_space(
).handle_error(
crimson::ct_error::assert_all{
"do_background_cycle encountered invalid error in main clean_space"
}
).finally([this, main_cold_usage] {
abort_cold_usage(main_cold_usage, true);
});
},
[this, proceed_clean_cold] {
if (!proceed_clean_cold) {
return seastar::now();
}
return cold_cleaner->clean_space(
).handle_error(
crimson::ct_error::assert_all{
"do_background_cycle encountered invalid error in cold clean_space"
}
);
}
).discard_result();
}
}
void ExtentPlacementManager::BackgroundProcess::register_metrics()
{
namespace sm = seastar::metrics;
metrics.add_group("background_process", {
sm::make_counter("io_count", stats.io_count,
sm::description("the sum of IOs")),
sm::make_counter("io_blocked_count", stats.io_blocked_count,
sm::description("IOs that are blocked by gc")),
sm::make_counter("io_blocked_count_trim", stats.io_blocked_count_trim,
sm::description("IOs that are blocked by trimming")),
sm::make_counter("io_blocked_count_clean", stats.io_blocked_count_clean,
sm::description("IOs that are blocked by cleaning")),
sm::make_counter("io_blocked_sum", stats.io_blocked_sum,
sm::description("the sum of blocking IOs"))
});
}
RandomBlockOolWriter::alloc_write_iertr::future<>
RandomBlockOolWriter::alloc_write_ool_extents(
Transaction& t,
std::list<LogicalCachedExtentRef>& extents)
{
if (extents.empty()) {
return alloc_write_iertr::now();
}
return seastar::with_gate(write_guard, [this, &t, &extents] {
return do_write(t, extents);
});
}
RandomBlockOolWriter::alloc_write_iertr::future<>
RandomBlockOolWriter::do_write(
Transaction& t,
std::list<LogicalCachedExtentRef>& extents)
{
LOG_PREFIX(RandomBlockOolWriter::do_write);
assert(!extents.empty());
DEBUGT("start with {} allocated extents",
t, extents.size());
return trans_intr::do_for_each(extents,
[this, &t, FNAME](auto& ex) {
auto paddr = ex->get_paddr();
assert(paddr.is_absolute());
RandomBlockManager * rbm = rb_cleaner->get_rbm(paddr);
assert(rbm);
TRACE("extent {}, allocated addr {}", fmt::ptr(ex.get()), paddr);
auto& stats = t.get_ool_write_stats();
stats.extents.num += 1;
stats.extents.bytes += ex->get_length();
stats.num_records += 1;
ex->prepare_write();
return rbm->write(paddr,
ex->get_bptr()
).handle_error(
alloc_write_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error when writing record"}
).safe_then([&t, &ex, paddr, FNAME]() {
TRACET("ool extent written at {} -- {}",
t, paddr, *ex);
t.mark_allocated_extent_ool(ex);
return alloc_write_iertr::now();
});
});
}
}
| 27,189 | 32.609394 | 89 | cc |
null | ceph-main/src/crimson/os/seastore/extent_placement_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include "seastar/core/gate.hh"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/journal/segment_allocator.h"
#include "crimson/os/seastore/journal/record_submitter.h"
#include "crimson/os/seastore/transaction.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/block_rb_manager.h"
#include "crimson/os/seastore/randomblock_manager_group.h"
class transaction_manager_test_t;
namespace crimson::os::seastore {
/**
* ExtentOolWriter
*
* Write the extents as out-of-line and allocate the physical addresses.
* Different writers write extents to different locations.
*/
class ExtentOolWriter {
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
public:
virtual ~ExtentOolWriter() {}
using open_ertr = base_ertr;
virtual open_ertr::future<> open() = 0;
virtual paddr_t alloc_paddr(extent_len_t length) = 0;
using alloc_write_ertr = base_ertr;
using alloc_write_iertr = trans_iertr<alloc_write_ertr>;
virtual alloc_write_iertr::future<> alloc_write_ool_extents(
Transaction &t,
std::list<LogicalCachedExtentRef> &extents) = 0;
using close_ertr = base_ertr;
virtual close_ertr::future<> close() = 0;
};
using ExtentOolWriterRef = std::unique_ptr<ExtentOolWriter>;
/**
* SegmentedOolWriter
*
* Different writers write extents to different out-of-line segments provided
* by the SegmentProvider.
*/
class SegmentedOolWriter : public ExtentOolWriter {
public:
SegmentedOolWriter(data_category_t category,
rewrite_gen_t gen,
SegmentProvider &sp,
SegmentSeqAllocator &ssa);
open_ertr::future<> open() final {
return record_submitter.open(false).discard_result();
}
alloc_write_iertr::future<> alloc_write_ool_extents(
Transaction &t,
std::list<LogicalCachedExtentRef> &extents) final;
close_ertr::future<> close() final {
return write_guard.close().then([this] {
return record_submitter.close();
}).safe_then([this] {
write_guard = seastar::gate();
});
}
paddr_t alloc_paddr(extent_len_t length) final {
return make_delayed_temp_paddr(0);
}
private:
alloc_write_iertr::future<> do_write(
Transaction& t,
std::list<LogicalCachedExtentRef> &extent);
alloc_write_ertr::future<> write_record(
Transaction& t,
record_t&& record,
std::list<LogicalCachedExtentRef> &&extents,
bool with_atomic_roll_segment=false);
journal::SegmentAllocator segment_allocator;
journal::RecordSubmitter record_submitter;
seastar::gate write_guard;
};
class RandomBlockOolWriter : public ExtentOolWriter {
public:
RandomBlockOolWriter(RBMCleaner* rb_cleaner) :
rb_cleaner(rb_cleaner) {}
using open_ertr = ExtentOolWriter::open_ertr;
open_ertr::future<> open() final {
return open_ertr::now();
}
alloc_write_iertr::future<> alloc_write_ool_extents(
Transaction &t,
std::list<LogicalCachedExtentRef> &extents) final;
close_ertr::future<> close() final {
return write_guard.close().then([this] {
write_guard = seastar::gate();
return close_ertr::now();
});
}
paddr_t alloc_paddr(extent_len_t length) final {
assert(rb_cleaner);
return rb_cleaner->alloc_paddr(length);
}
private:
alloc_write_iertr::future<> do_write(
Transaction& t,
std::list<LogicalCachedExtentRef> &extent);
RBMCleaner* rb_cleaner;
seastar::gate write_guard;
};
struct cleaner_usage_t {
// The size of all extents write to the main devices, including inline extents
// and out-of-line extents.
std::size_t main_usage = 0;
// The size of extents write to the cold devices
std::size_t cold_ool_usage = 0;
};
struct reserve_cleaner_result_t {
bool reserve_main_success = true;
bool reserve_cold_success = true;
bool is_successful() const {
return reserve_main_success &&
reserve_cold_success;
}
};
/**
* io_usage_t
*
* io_usage_t describes the space usage consumed by client IO.
*/
struct io_usage_t {
// The total size of all inlined extents, not including deltas and other metadata
// produced by Cache::prepare_record.
std::size_t inline_usage = 0;
cleaner_usage_t cleaner_usage;
friend std::ostream &operator<<(std::ostream &out, const io_usage_t &usage) {
return out << "io_usage_t("
<< "inline_usage=" << usage.inline_usage
<< ", main_cleaner_usage=" << usage.cleaner_usage.main_usage
<< ", cold_cleaner_usage=" << usage.cleaner_usage.cold_ool_usage
<< ")";
}
};
struct reserve_io_result_t {
bool reserve_inline_success = true;
reserve_cleaner_result_t cleaner_result;
bool is_successful() const {
return reserve_inline_success &&
cleaner_result.is_successful();
}
};
class ExtentPlacementManager {
public:
ExtentPlacementManager()
: ool_segment_seq_allocator(
std::make_unique<SegmentSeqAllocator>(segment_type_t::OOL))
{
devices_by_id.resize(DEVICE_ID_MAX, nullptr);
}
void init(JournalTrimmerImplRef &&, AsyncCleanerRef &&, AsyncCleanerRef &&);
SegmentSeqAllocator &get_ool_segment_seq_allocator() const {
return *ool_segment_seq_allocator;
}
void set_primary_device(Device *device);
void set_extent_callback(ExtentCallbackInterface *cb) {
background_process.set_extent_callback(cb);
}
journal_type_t get_journal_type() const {
return background_process.get_journal_type();
}
extent_len_t get_block_size() const {
assert(primary_device != nullptr);
// assume all the devices have the same block size
return primary_device->get_block_size();
}
Device& get_primary_device() {
assert(primary_device != nullptr);
return *primary_device;
}
store_statfs_t get_stat() const {
return background_process.get_stat();
}
using mount_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using mount_ret = mount_ertr::future<>;
mount_ret mount() {
return background_process.mount();
}
using open_ertr = ExtentOolWriter::open_ertr;
open_ertr::future<> open_for_write();
void start_scan_space() {
return background_process.start_scan_space();
}
void start_background() {
return background_process.start_background();
}
struct alloc_result_t {
paddr_t paddr;
bufferptr bp;
rewrite_gen_t gen;
};
alloc_result_t alloc_new_extent(
Transaction& t,
extent_types_t type,
extent_len_t length,
placement_hint_t hint,
#ifdef UNIT_TESTS_BUILT
rewrite_gen_t gen,
std::optional<paddr_t> external_paddr = std::nullopt
#else
rewrite_gen_t gen
#endif
) {
assert(hint < placement_hint_t::NUM_HINTS);
assert(is_target_rewrite_generation(gen));
assert(gen == INIT_GENERATION || hint == placement_hint_t::REWRITE);
data_category_t category = get_extent_category(type);
gen = adjust_generation(category, type, hint, gen);
// XXX: bp might be extended to point to different memory (e.g. PMem)
// according to the allocator.
auto bp = ceph::bufferptr(
buffer::create_page_aligned(length));
bp.zero();
paddr_t addr;
#ifdef UNIT_TESTS_BUILT
if (unlikely(external_paddr.has_value())) {
assert(external_paddr->is_fake());
addr = *external_paddr;
} else if (gen == INLINE_GENERATION) {
#else
if (gen == INLINE_GENERATION) {
#endif
addr = make_record_relative_paddr(0);
} else if (category == data_category_t::DATA) {
assert(data_writers_by_gen[generation_to_writer(gen)]);
addr = data_writers_by_gen[
generation_to_writer(gen)]->alloc_paddr(length);
} else {
assert(category == data_category_t::METADATA);
assert(md_writers_by_gen[generation_to_writer(gen)]);
addr = md_writers_by_gen[
generation_to_writer(gen)]->alloc_paddr(length);
}
return {addr, std::move(bp), gen};
}
/**
* dispatch_result_t
*
* ool extents are placed in alloc_map and passed to
* EPM::write_delayed_ool_extents,
* delayed_extents is used to update lba mapping.
* usage is used to reserve projected space
*/
using extents_by_writer_t =
std::map<ExtentOolWriter*, std::list<LogicalCachedExtentRef>>;
struct dispatch_result_t {
extents_by_writer_t alloc_map;
std::list<LogicalCachedExtentRef> delayed_extents;
io_usage_t usage;
};
/**
* dispatch_delayed_extents
*
* Performs delayed allocation
*/
dispatch_result_t dispatch_delayed_extents(Transaction& t);
/**
* write_delayed_ool_extents
*
* Do writes for out-of-line extents.
*/
using alloc_paddr_iertr = ExtentOolWriter::alloc_write_iertr;
alloc_paddr_iertr::future<> write_delayed_ool_extents(
Transaction& t,
extents_by_writer_t& alloc_map);
/**
* write_preallocated_ool_extents
*
* Performs ool writes for extents with pre-allocated addresses.
* See Transaction::pre_alloc_list
*/
alloc_paddr_iertr::future<> write_preallocated_ool_extents(
Transaction &t,
std::list<LogicalCachedExtentRef> extents);
seastar::future<> stop_background() {
return background_process.stop_background();
}
using close_ertr = ExtentOolWriter::close_ertr;
close_ertr::future<> close();
using read_ertr = Device::read_ertr;
read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out
) {
assert(devices_by_id[addr.get_device_id()] != nullptr);
return devices_by_id[addr.get_device_id()]->read(addr, len, out);
}
void mark_space_used(paddr_t addr, extent_len_t len) {
background_process.mark_space_used(addr, len);
}
void mark_space_free(paddr_t addr, extent_len_t len) {
background_process.mark_space_free(addr, len);
}
void commit_space_used(paddr_t addr, extent_len_t len) {
return background_process.commit_space_used(addr, len);
}
seastar::future<> reserve_projected_usage(io_usage_t usage) {
return background_process.reserve_projected_usage(usage);
}
void release_projected_usage(const io_usage_t &usage) {
background_process.release_projected_usage(usage);
}
backend_type_t get_main_backend_type() const {
if (!background_process.is_no_background()) {
return background_process.get_main_backend_type();
}
// for test
assert(primary_device);
return primary_device->get_backend_type();
}
// Testing interfaces
void test_init_no_background(Device *test_device) {
assert(test_device->get_backend_type() == backend_type_t::SEGMENTED);
add_device(test_device);
set_primary_device(test_device);
}
bool check_usage() {
return background_process.check_usage();
}
seastar::future<> run_background_work_until_halt() {
return background_process.run_until_halt();
}
private:
rewrite_gen_t adjust_generation(
data_category_t category,
extent_types_t type,
placement_hint_t hint,
rewrite_gen_t gen) {
if (type == extent_types_t::ROOT) {
gen = INLINE_GENERATION;
} else if (get_main_backend_type() == backend_type_t::SEGMENTED &&
is_lba_backref_node(type)) {
gen = INLINE_GENERATION;
} else if (hint == placement_hint_t::COLD) {
assert(gen == INIT_GENERATION);
if (background_process.has_cold_tier()) {
gen = MIN_COLD_GENERATION;
} else {
gen = MIN_REWRITE_GENERATION;
}
} else if (gen == INIT_GENERATION) {
if (category == data_category_t::METADATA) {
if (get_main_backend_type() == backend_type_t::SEGMENTED) {
// with SEGMENTED, default not to ool metadata extents to reduce
// padding overhead.
// TODO: improve padding so we can default to the ool path.
gen = INLINE_GENERATION;
} else {
// with RBM, all extents must be OOL
assert(get_main_backend_type() ==
backend_type_t::RANDOM_BLOCK);
gen = OOL_GENERATION;
}
} else {
assert(category == data_category_t::DATA);
gen = OOL_GENERATION;
}
} else if (background_process.has_cold_tier()) {
gen = background_process.adjust_generation(gen);
}
if (gen > dynamic_max_rewrite_generation) {
gen = dynamic_max_rewrite_generation;
}
return gen;
}
void add_device(Device *device) {
auto device_id = device->get_device_id();
ceph_assert(devices_by_id[device_id] == nullptr);
devices_by_id[device_id] = device;
++num_devices;
}
/**
* dispatch_delayed_extent
*
* Specify the extent inline or ool
* return true indicates inline otherwise ool
*/
bool dispatch_delayed_extent(LogicalCachedExtentRef& extent) {
// TODO: all delayed extents are ool currently
boost::ignore_unused(extent);
return false;
}
ExtentOolWriter* get_writer(placement_hint_t hint,
data_category_t category,
rewrite_gen_t gen) {
assert(hint < placement_hint_t::NUM_HINTS);
assert(is_rewrite_generation(gen));
assert(gen != INLINE_GENERATION);
assert(gen <= dynamic_max_rewrite_generation);
if (category == data_category_t::DATA) {
return data_writers_by_gen[generation_to_writer(gen)];
} else {
assert(category == data_category_t::METADATA);
return md_writers_by_gen[generation_to_writer(gen)];
}
}
/**
* BackgroundProcess
*
* Background process to schedule background transactions.
*
* TODO: device tiering
*/
class BackgroundProcess : public BackgroundListener {
public:
BackgroundProcess() = default;
void init(JournalTrimmerImplRef &&_trimmer,
AsyncCleanerRef &&_cleaner,
AsyncCleanerRef &&_cold_cleaner) {
trimmer = std::move(_trimmer);
trimmer->set_background_callback(this);
main_cleaner = std::move(_cleaner);
main_cleaner->set_background_callback(this);
if (_cold_cleaner) {
cold_cleaner = std::move(_cold_cleaner);
cold_cleaner->set_background_callback(this);
cleaners_by_device_id.resize(DEVICE_ID_MAX, nullptr);
for (auto id : main_cleaner->get_device_ids()) {
cleaners_by_device_id[id] = main_cleaner.get();
}
for (auto id : cold_cleaner->get_device_ids()) {
cleaners_by_device_id[id] = cold_cleaner.get();
}
eviction_state.init(
crimson::common::get_conf<double>(
"seastore_multiple_tiers_stop_evict_ratio"),
crimson::common::get_conf<double>(
"seastore_multiple_tiers_default_evict_ratio"),
crimson::common::get_conf<double>(
"seastore_multiple_tiers_fast_evict_ratio"));
}
}
journal_type_t get_journal_type() const {
return trimmer->get_journal_type();
}
bool has_cold_tier() const {
return cold_cleaner.get() != nullptr;
}
void set_extent_callback(ExtentCallbackInterface *cb) {
trimmer->set_extent_callback(cb);
main_cleaner->set_extent_callback(cb);
if (has_cold_tier()) {
cold_cleaner->set_extent_callback(cb);
}
}
store_statfs_t get_stat() const {
auto stat = main_cleaner->get_stat();
if (has_cold_tier()) {
stat.add(cold_cleaner->get_stat());
}
return stat;
}
using mount_ret = ExtentPlacementManager::mount_ret;
mount_ret mount() {
ceph_assert(state == state_t::STOP);
state = state_t::MOUNT;
trimmer->reset();
stats = {};
register_metrics();
return main_cleaner->mount(
).safe_then([this] {
return has_cold_tier() ? cold_cleaner->mount() : mount_ertr::now();
});
}
void start_scan_space() {
ceph_assert(state == state_t::MOUNT);
state = state_t::SCAN_SPACE;
ceph_assert(main_cleaner->check_usage_is_empty());
ceph_assert(!has_cold_tier() ||
cold_cleaner->check_usage_is_empty());
}
void start_background();
void mark_space_used(paddr_t addr, extent_len_t len) {
if (state < state_t::SCAN_SPACE) {
return;
}
if (!has_cold_tier()) {
assert(main_cleaner);
main_cleaner->mark_space_used(addr, len);
} else {
auto id = addr.get_device_id();
assert(id < cleaners_by_device_id.size());
auto cleaner = cleaners_by_device_id[id];
assert(cleaner);
cleaner->mark_space_used(addr, len);
}
}
void mark_space_free(paddr_t addr, extent_len_t len) {
if (state < state_t::SCAN_SPACE) {
return;
}
if (!has_cold_tier()) {
assert(main_cleaner);
main_cleaner->mark_space_free(addr, len);
} else {
auto id = addr.get_device_id();
assert(id < cleaners_by_device_id.size());
auto cleaner = cleaners_by_device_id[id];
assert(cleaner);
cleaner->mark_space_free(addr, len);
}
}
void commit_space_used(paddr_t addr, extent_len_t len) {
if (state < state_t::SCAN_SPACE) {
return;
}
if (!has_cold_tier()) {
assert(main_cleaner);
main_cleaner->commit_space_used(addr, len);
} else {
auto id = addr.get_device_id();
assert(id < cleaners_by_device_id.size());
auto cleaner = cleaners_by_device_id[id];
assert(cleaner);
cleaner->commit_space_used(addr, len);
}
}
rewrite_gen_t adjust_generation(rewrite_gen_t gen) {
if (has_cold_tier()) {
return eviction_state.adjust_generation_with_eviction(gen);
} else {
return gen;
}
}
seastar::future<> reserve_projected_usage(io_usage_t usage);
void release_projected_usage(const io_usage_t &usage) {
if (is_ready()) {
trimmer->release_inline_usage(usage.inline_usage);
main_cleaner->release_projected_usage(usage.cleaner_usage.main_usage);
if (has_cold_tier()) {
cold_cleaner->release_projected_usage(usage.cleaner_usage.cold_ool_usage);
}
}
}
seastar::future<> stop_background();
backend_type_t get_main_backend_type() const {
return get_journal_type();
}
// Testing interfaces
bool check_usage() {
return main_cleaner->check_usage() &&
(!has_cold_tier() || cold_cleaner->check_usage());
}
seastar::future<> run_until_halt();
bool is_no_background() const {
return !trimmer || !main_cleaner;
}
protected:
state_t get_state() const final {
return state;
}
void maybe_wake_background() final {
if (!is_running()) {
return;
}
if (background_should_run()) {
do_wake_background();
}
}
void maybe_wake_blocked_io() final {
if (!is_ready()) {
return;
}
if (!should_block_io() && blocking_io) {
blocking_io->set_value();
blocking_io = std::nullopt;
}
}
private:
// reserve helpers
bool try_reserve_cold(std::size_t usage);
void abort_cold_usage(std::size_t usage, bool success);
reserve_cleaner_result_t try_reserve_cleaner(const cleaner_usage_t &usage);
void abort_cleaner_usage(const cleaner_usage_t &usage,
const reserve_cleaner_result_t &result);
reserve_io_result_t try_reserve_io(const io_usage_t &usage);
void abort_io_usage(const io_usage_t &usage,
const reserve_io_result_t &result);
bool is_running() const {
if (state == state_t::RUNNING) {
assert(process_join);
return true;
} else {
assert(!process_join);
return false;
}
}
void log_state(const char *caller) const;
seastar::future<> run();
void do_wake_background() {
if (blocking_background) {
blocking_background->set_value();
blocking_background = std::nullopt;
}
}
// background_should_run() should be atomic with do_background_cycle()
// to make sure the condition is consistent.
bool background_should_run() {
assert(is_ready());
maybe_update_eviction_mode();
return main_cleaner_should_run()
|| cold_cleaner_should_run()
|| trimmer->should_trim();
}
bool main_cleaner_should_run() const {
assert(is_ready());
return main_cleaner->should_clean_space() ||
(has_cold_tier() &&
main_cleaner->can_clean_space() &&
eviction_state.is_fast_mode());
}
bool cold_cleaner_should_run() const {
assert(is_ready());
return has_cold_tier() &&
cold_cleaner->should_clean_space();
}
bool should_block_io() const {
assert(is_ready());
return trimmer->should_block_io_on_trim() ||
main_cleaner->should_block_io_on_clean() ||
(has_cold_tier() &&
cold_cleaner->should_block_io_on_clean());
}
void maybe_update_eviction_mode() {
if (has_cold_tier()) {
auto main_alive_ratio = main_cleaner->get_stat().get_used_raw_ratio();
eviction_state.maybe_update_eviction_mode(main_alive_ratio);
}
}
struct eviction_state_t {
enum class eviction_mode_t {
STOP, // generation greater than or equal to MIN_COLD_GENERATION
// will be set to MIN_COLD_GENERATION - 1, which means
// no extents will be evicted.
DEFAULT, // generation incremented with each rewrite. Extents will
// be evicted when generation reaches MIN_COLD_GENERATION.
FAST, // map all generations located in
// [MIN_REWRITE_GENERATION, MIN_COLD_GENERATIOIN) to
// MIN_COLD_GENERATION.
};
eviction_mode_t eviction_mode;
double stop_evict_ratio;
double default_evict_ratio;
double fast_evict_ratio;
void init(double stop_ratio,
double default_ratio,
double fast_ratio) {
ceph_assert(0 <= stop_ratio);
ceph_assert(stop_ratio < default_ratio);
ceph_assert(default_ratio < fast_ratio);
ceph_assert(fast_ratio <= 1);
eviction_mode = eviction_mode_t::STOP;
stop_evict_ratio = stop_ratio;
default_evict_ratio = default_ratio;
fast_evict_ratio = fast_ratio;
}
bool is_stop_mode() const {
return eviction_mode == eviction_mode_t::STOP;
}
bool is_default_mode() const {
return eviction_mode == eviction_mode_t::DEFAULT;
}
bool is_fast_mode() const {
return eviction_mode == eviction_mode_t::FAST;
}
rewrite_gen_t adjust_generation_with_eviction(rewrite_gen_t gen) {
rewrite_gen_t ret = gen;
switch(eviction_mode) {
case eviction_mode_t::STOP:
if (gen == MIN_COLD_GENERATION) {
ret = MIN_COLD_GENERATION - 1;
}
break;
case eviction_mode_t::DEFAULT:
break;
case eviction_mode_t::FAST:
if (gen >= MIN_REWRITE_GENERATION && gen < MIN_COLD_GENERATION) {
ret = MIN_COLD_GENERATION;
}
break;
default:
ceph_abort("impossible");
}
return ret;
}
// We change the state of eviction_mode according to the alive ratio
// of the main cleaner.
//
// Use A, B, C, D to represent the state of alive ratio:
// A: alive ratio <= stop_evict_ratio
// B: alive ratio <= default_evict_ratio
// C: alive ratio <= fast_evict_ratio
// D: alive ratio > fast_evict_ratio
//
// and use X, Y, Z to shorten the state of eviction_mode_t:
// X: STOP
// Y: DEFAULT
// Z: FAST
//
// Then we can use a form like (A && X) to describe the current state
// of the main cleaner, which indicates the alive ratio is less than or
// equal to stop_evict_ratio and current eviction mode is STOP.
//
// all valid state transitions show as follow:
// (A && X) => (B && X) => (C && Y) => (D && Z) =>
// (C && Z) => (B && Y) => (A && X)
// `--> (C && Y) => ...
//
// when the system restarts, the init state is (_ && X), the
// transitions should be:
// (_ && X) -> (A && X) => normal transition
// -> (B && X) => normal transition
// -> (C && X) => (C && Y) => normal transition
// -> (D && X) => (D && Z) => normal transition
void maybe_update_eviction_mode(double main_alive_ratio) {
if (main_alive_ratio <= stop_evict_ratio) {
eviction_mode = eviction_mode_t::STOP;
} else if (main_alive_ratio <= default_evict_ratio) {
if (eviction_mode > eviction_mode_t::DEFAULT) {
eviction_mode = eviction_mode_t::DEFAULT;
}
} else if (main_alive_ratio <= fast_evict_ratio) {
if (eviction_mode < eviction_mode_t::DEFAULT) {
eviction_mode = eviction_mode_t::DEFAULT;
}
} else {
assert(main_alive_ratio > fast_evict_ratio);
eviction_mode = eviction_mode_t::FAST;
}
}
};
seastar::future<> do_background_cycle();
void register_metrics();
struct {
uint64_t io_blocking_num = 0;
uint64_t io_count = 0;
uint64_t io_blocked_count = 0;
uint64_t io_blocked_count_trim = 0;
uint64_t io_blocked_count_clean = 0;
uint64_t io_blocked_sum = 0;
} stats;
seastar::metrics::metric_group metrics;
JournalTrimmerImplRef trimmer;
AsyncCleanerRef main_cleaner;
/*
* cold tier (optional, see has_cold_tier())
*/
AsyncCleanerRef cold_cleaner;
std::vector<AsyncCleaner*> cleaners_by_device_id;
std::optional<seastar::future<>> process_join;
std::optional<seastar::promise<>> blocking_background;
std::optional<seastar::promise<>> blocking_io;
bool is_running_until_halt = false;
state_t state = state_t::STOP;
eviction_state_t eviction_state;
friend class ::transaction_manager_test_t;
};
std::vector<ExtentOolWriterRef> writer_refs;
std::vector<ExtentOolWriter*> data_writers_by_gen;
// gen 0 METADATA writer is the journal writer
std::vector<ExtentOolWriter*> md_writers_by_gen;
std::vector<Device*> devices_by_id;
Device* primary_device = nullptr;
std::size_t num_devices = 0;
rewrite_gen_t dynamic_max_rewrite_generation = REWRITE_GENERATIONS;
BackgroundProcess background_process;
// TODO: drop once paddr->journal_seq_t is introduced
SegmentSeqAllocatorRef ool_segment_seq_allocator;
friend class ::transaction_manager_test_t;
};
using ExtentPlacementManagerRef = std::unique_ptr<ExtentPlacementManager>;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::io_usage_t> : fmt::ostream_formatter {};
#endif
| 27,137 | 28.626638 | 97 | h |
null | ceph-main/src/crimson/os/seastore/extentmap_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <experimental/iterator>
#include <iostream>
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/extentmap_manager.h"
#include "crimson/os/seastore/extentmap_manager/btree/btree_extentmap_manager.h"
namespace crimson::os::seastore::extentmap_manager {
ExtentMapManagerRef create_extentmap_manager(
TransactionManager &trans_manager) {
return ExtentMapManagerRef(new BtreeExtentMapManager(trans_manager));
}
}
namespace crimson::os::seastore {
std::ostream &operator<<(std::ostream &out, const extent_mapping_t &rhs)
{
return out << "extent_mapping_t (" << rhs.logical_offset << "~" << rhs.length
<< "->" << rhs.laddr << ")";
}
std::ostream &operator<<(std::ostream &out, const extent_map_list_t &rhs)
{
out << '[';
std::copy(std::begin(rhs), std::end(rhs), std::experimental::make_ostream_joiner(out, ", "));
return out << ']';
}
}
| 997 | 28.352941 | 95 | cc |
null | ceph-main/src/crimson/os/seastore/journal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal.h"
#include "journal/segmented_journal.h"
#include "journal/circular_bounded_journal.h"
namespace crimson::os::seastore::journal {
JournalRef make_segmented(
SegmentProvider &provider,
JournalTrimmer &trimmer)
{
return std::make_unique<SegmentedJournal>(provider, trimmer);
}
JournalRef make_circularbounded(
JournalTrimmer &trimmer,
crimson::os::seastore::random_block_device::RBMDevice* device,
std::string path)
{
return std::make_unique<CircularBoundedJournal>(trimmer, device, path);
}
}
| 628 | 23.192308 | 73 | cc |
null | ceph-main/src/crimson/os/seastore/journal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include "crimson/os/seastore/ordering_handle.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
namespace crimson::os::seastore {
namespace random_block_device {
class RBMDevice;
}
class SegmentManagerGroup;
class SegmentProvider;
class JournalTrimmer;
class Journal {
public:
virtual JournalTrimmer &get_trimmer() = 0;
/**
* initializes journal for mkfs writes -- must run prior to calls
* to submit_record.
*/
using open_for_mkfs_ertr = crimson::errorator<
crimson::ct_error::input_output_error
>;
using open_for_mkfs_ret = open_for_mkfs_ertr::future<journal_seq_t>;
virtual open_for_mkfs_ret open_for_mkfs() = 0;
/**
* initializes journal for new writes -- must run prior to calls
* to submit_record. Should be called after replay if not a new
* Journal.
*/
using open_for_mount_ertr = open_for_mkfs_ertr;
using open_for_mount_ret = open_for_mkfs_ret;
virtual open_for_mount_ret open_for_mount() = 0;
/// close journal
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual close_ertr::future<> close() = 0;
/**
* submit_record
*
* write record with the ordering handle
*/
using submit_record_ertr = crimson::errorator<
crimson::ct_error::erange,
crimson::ct_error::input_output_error
>;
using submit_record_ret = submit_record_ertr::future<
record_locator_t
>;
virtual submit_record_ret submit_record(
record_t &&record,
OrderingHandle &handle
) = 0;
/**
* flush
*
* Wait for all outstanding IOs on handle to commit.
* Note, flush() machinery must go through the same pipeline
* stages and locks as submit_record.
*/
virtual seastar::future<> flush(OrderingHandle &handle) = 0;
/// sets write pipeline reference
virtual void set_write_pipeline(WritePipeline *_write_pipeline) = 0;
/**
* Read deltas and pass to delta_handler
*
* record_block_start (argument to delta_handler) is the start of the
* of the first block in the record
*/
using replay_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using replay_ret = replay_ertr::future<>;
using delta_handler_t = std::function<
replay_ertr::future<bool>(
const record_locator_t&,
const delta_info_t&,
const journal_seq_t&, // dirty_tail
const journal_seq_t&, // alloc_tail
sea_time_point modify_time)>;
virtual replay_ret replay(
delta_handler_t &&delta_handler) = 0;
virtual seastar::future<> finish_commit(
transaction_type_t type) = 0;
virtual ~Journal() {}
virtual journal_type_t get_type() = 0;
};
using JournalRef = std::unique_ptr<Journal>;
namespace journal {
JournalRef make_segmented(
SegmentProvider &provider,
JournalTrimmer &trimmer);
JournalRef make_circularbounded(
JournalTrimmer &trimmer,
crimson::os::seastore::random_block_device::RBMDevice* device,
std::string path);
}
}
| 3,215 | 25.146341 | 71 | h |
null | ceph-main/src/crimson/os/seastore/lba_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/lba_manager/btree/btree_lba_manager.h"
namespace crimson::os::seastore {
LBAManager::update_mappings_ret
LBAManager::update_mappings(
Transaction& t,
const std::list<LogicalCachedExtentRef>& extents)
{
return trans_intr::do_for_each(extents,
[this, &t](auto &extent) {
return update_mapping(
t,
extent->get_laddr(),
extent->get_prior_paddr_and_reset(),
extent->get_paddr(),
nullptr // all the extents should have already been
// added to the fixed_kv_btree
);
});
}
LBAManagerRef lba_manager::create_lba_manager(Cache &cache) {
return LBAManagerRef(new btree::BtreeLBAManager(cache));
}
}
| 822 | 24.71875 | 70 | cc |
null | ceph-main/src/crimson/os/seastore/lba_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer_fwd.h"
#include "include/interval_set.h"
#include "common/interval_map.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
/**
* Abstract interface for managing the logical to physical mapping
*/
class LBAManager {
public:
using base_iertr = Cache::base_iertr;
using mkfs_iertr = base_iertr;
using mkfs_ret = mkfs_iertr::future<>;
virtual mkfs_ret mkfs(
Transaction &t
) = 0;
/**
* Fetches mappings for laddr_t in range [offset, offset + len)
*
* Future will not resolve until all pins have resolved (set_paddr called)
*/
using get_mappings_iertr = base_iertr;
using get_mappings_ret = get_mappings_iertr::future<lba_pin_list_t>;
virtual get_mappings_ret get_mappings(
Transaction &t,
laddr_t offset, extent_len_t length) = 0;
/**
* Fetches mappings for a list of laddr_t in range [offset, offset + len)
*
* Future will not resolve until all pins have resolved (set_paddr called)
*/
virtual get_mappings_ret get_mappings(
Transaction &t,
laddr_list_t &&extent_lisk) = 0;
/**
* Fetches the mapping for laddr_t
*
* Future will not resolve until the pin has resolved (set_paddr called)
*/
using get_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using get_mapping_ret = get_mapping_iertr::future<LBAMappingRef>;
virtual get_mapping_ret get_mapping(
Transaction &t,
laddr_t offset) = 0;
/**
* Allocates a new mapping referenced by LBARef
*
* Offset will be relative to the block offset of the record
* This mapping will block from transaction submission until set_paddr
* is called on the LBAMapping.
*/
using alloc_extent_iertr = base_iertr;
using alloc_extent_ret = alloc_extent_iertr::future<LBAMappingRef>;
virtual alloc_extent_ret alloc_extent(
Transaction &t,
laddr_t hint,
extent_len_t len,
paddr_t addr,
LogicalCachedExtent *nextent) = 0;
struct ref_update_result_t {
unsigned refcount = 0;
paddr_t addr;
extent_len_t length = 0;
};
using ref_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using ref_ret = ref_iertr::future<ref_update_result_t>;
/**
* Decrements ref count on extent
*
* @return returns resulting refcount
*/
virtual ref_ret decref_extent(
Transaction &t,
laddr_t addr) = 0;
/**
* Increments ref count on extent
*
* @return returns resulting refcount
*/
virtual ref_ret incref_extent(
Transaction &t,
laddr_t addr) = 0;
/**
* Should be called after replay on each cached extent.
* Implementation must initialize the LBAMapping on any
* LogicalCachedExtent's and may also read in any dependent
* structures, etc.
*
* @return returns whether the extent is alive
*/
using init_cached_extent_iertr = base_iertr;
using init_cached_extent_ret = init_cached_extent_iertr::future<bool>;
virtual init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) = 0;
using check_child_trackers_ret = base_iertr::future<>;
virtual check_child_trackers_ret check_child_trackers(Transaction &t) = 0;
/**
* Calls f for each mapping in [begin, end)
*/
using scan_mappings_iertr = base_iertr;
using scan_mappings_ret = scan_mappings_iertr::future<>;
using scan_mappings_func_t = std::function<
void(laddr_t, paddr_t, extent_len_t)>;
virtual scan_mappings_ret scan_mappings(
Transaction &t,
laddr_t begin,
laddr_t end,
scan_mappings_func_t &&f) = 0;
/**
* rewrite_extent
*
* rewrite extent into passed transaction
*/
using rewrite_extent_iertr = base_iertr;
using rewrite_extent_ret = rewrite_extent_iertr::future<>;
virtual rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) = 0;
/**
* update_mapping
*
* update lba mapping for a delayed allocated extent
*/
using update_mapping_iertr = base_iertr;
using update_mapping_ret = base_iertr::future<>;
virtual update_mapping_ret update_mapping(
Transaction& t,
laddr_t laddr,
paddr_t prev_addr,
paddr_t paddr,
LogicalCachedExtent *nextent) = 0;
/**
* update_mappings
*
* update lba mappings for delayed allocated extents
*/
using update_mappings_iertr = update_mapping_iertr;
using update_mappings_ret = update_mapping_ret;
update_mappings_ret update_mappings(
Transaction& t,
const std::list<LogicalCachedExtentRef>& extents);
/**
* get_physical_extent_if_live
*
* Returns extent at addr/laddr if still live (if laddr
* still points at addr). Extent must be an internal, physical
* extent.
*
* Returns a null CachedExtentRef if extent is not live.
*/
using get_physical_extent_if_live_iertr = base_iertr;
using get_physical_extent_if_live_ret =
get_physical_extent_if_live_iertr::future<CachedExtentRef>;
virtual get_physical_extent_if_live_ret get_physical_extent_if_live(
Transaction &t,
extent_types_t type,
paddr_t addr,
laddr_t laddr,
extent_len_t len) = 0;
virtual ~LBAManager() {}
};
using LBAManagerRef = std::unique_ptr<LBAManager>;
class Cache;
namespace lba_manager {
LBAManagerRef create_lba_manager(Cache &cache);
}
}
| 5,641 | 26.125 | 76 | h |
null | ceph-main/src/crimson/os/seastore/logging.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <fmt/format.h>
#include "crimson/common/log.h"
#define LOGT(level_, MSG, t, ...) \
LOCAL_LOGGER.log(level_, "{} trans.{} {}: " MSG, (void*)&t, \
(t).get_trans_id(), FNAME , ##__VA_ARGS__)
#define SUBLOGT(subname_, level_, MSG, t, ...) \
LOGGER(subname_).log(level_, "{} trans.{} {}: " MSG, (void*)&t, \
(t).get_trans_id(), FNAME , ##__VA_ARGS__)
#define TRACET(...) LOGT(seastar::log_level::trace, __VA_ARGS__)
#define SUBTRACET(subname_, ...) SUBLOGT(subname_, seastar::log_level::trace, __VA_ARGS__)
#define DEBUGT(...) LOGT(seastar::log_level::debug, __VA_ARGS__)
#define SUBDEBUGT(subname_, ...) SUBLOGT(subname_, seastar::log_level::debug, __VA_ARGS__)
#define INFOT(...) LOGT(seastar::log_level::info, __VA_ARGS__)
#define SUBINFOT(subname_, ...) SUBLOGT(subname_, seastar::log_level::info, __VA_ARGS__)
#define WARNT(...) LOGT(seastar::log_level::warn, __VA_ARGS__)
#define SUBWARNT(subname_, ...) SUBLOGT(subname_, seastar::log_level::warn, __VA_ARGS__)
#define ERRORT(...) LOGT(seastar::log_level::error, __VA_ARGS__)
#define SUBERRORT(subname_, ...) SUBLOGT(subname_, seastar::log_level::error, __VA_ARGS__)
| 1,259 | 39.645161 | 90 | h |
null | ceph-main/src/crimson/os/seastore/object_data_handler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <utility>
#include <functional>
#include "crimson/common/log.h"
#include "crimson/os/seastore/object_data_handler.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_seastore_odata);
}
}
SET_SUBSYS(seastore_odata);
namespace crimson::os::seastore {
#define assert_aligned(x) ceph_assert(((x)%ctx.tm.get_block_size()) == 0)
using context_t = ObjectDataHandler::context_t;
using get_iertr = ObjectDataHandler::write_iertr;
/**
* extent_to_write_t
*
* Encapsulates smallest write operations in overwrite.
* Indicates a zero/existing extent or a data extent based on whether
* to_write is populate.
* Should be handled by prepare_ops_list.
*/
struct extent_to_write_t {
enum class type_t {
DATA,
ZERO,
EXISTING,
};
type_t type;
/// pin of original extent, not nullptr if type == EXISTING
LBAMappingRef pin;
laddr_t addr;
extent_len_t len;
/// non-nullopt if and only if type == DATA
std::optional<bufferlist> to_write;
extent_to_write_t(const extent_to_write_t &) = delete;
extent_to_write_t(extent_to_write_t &&) = default;
bool is_data() const {
return type == type_t::DATA;
}
bool is_zero() const {
return type == type_t::ZERO;
}
bool is_existing() const {
return type == type_t::EXISTING;
}
laddr_t get_end_addr() const {
return addr + len;
}
static extent_to_write_t create_data(
laddr_t addr, bufferlist to_write) {
return extent_to_write_t(addr, to_write);
}
static extent_to_write_t create_zero(
laddr_t addr, extent_len_t len) {
return extent_to_write_t(addr, len);
}
static extent_to_write_t create_existing(
LBAMappingRef &&pin, laddr_t addr, extent_len_t len) {
assert(pin);
return extent_to_write_t(std::move(pin), addr, len);
}
private:
extent_to_write_t(laddr_t addr, bufferlist to_write)
: type(type_t::DATA), addr(addr), len(to_write.length()),
to_write(to_write) {}
extent_to_write_t(laddr_t addr, extent_len_t len)
: type(type_t::ZERO), addr(addr), len(len) {}
extent_to_write_t(LBAMappingRef &&pin, laddr_t addr, extent_len_t len)
: type(type_t::EXISTING), pin(std::move(pin)), addr(addr), len(len) {}
};
using extent_to_write_list_t = std::list<extent_to_write_t>;
// Encapsulates extents to be written out using do_remappings.
struct extent_to_remap_t {
enum class type_t {
REMAP,
OVERWRITE
};
type_t type;
/// pin of original extent
LBAMappingRef pin;
/// offset of remapped extent or overwrite part of overwrite extent.
/// overwrite part of overwrite extent might correspond to mutiple
/// fresh write extent.
extent_len_t new_offset;
/// length of remapped extent or overwrite part of overwrite extent
extent_len_t new_len;
extent_to_remap_t(const extent_to_remap_t &) = delete;
extent_to_remap_t(extent_to_remap_t &&) = default;
bool is_remap() const {
return type == type_t::REMAP;
}
bool is_overwrite() const {
assert((new_offset != 0) && (pin->get_length() != new_offset + new_len));
return type == type_t::OVERWRITE;
}
using remap_entry = TransactionManager::remap_entry;
remap_entry create_remap_entry() {
assert(is_remap());
return remap_entry(
new_offset,
new_len);
}
remap_entry create_left_remap_entry() {
assert(is_overwrite());
return remap_entry(
0,
new_offset);
}
remap_entry create_right_remap_entry() {
assert(is_overwrite());
return remap_entry(
new_offset + new_len,
pin->get_length() - new_offset - new_len);
}
static extent_to_remap_t create_remap(
LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len) {
return extent_to_remap_t(type_t::REMAP,
std::move(pin), new_offset, new_len);
}
static extent_to_remap_t create_overwrite(
LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len) {
return extent_to_remap_t(type_t::OVERWRITE,
std::move(pin), new_offset, new_len);
}
private:
extent_to_remap_t(type_t type,
LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len)
: type(type),
pin(std::move(pin)), new_offset(new_offset), new_len(new_len) {}
};
using extent_to_remap_list_t = std::list<extent_to_remap_t>;
// Encapsulates extents to be written out using do_insertions.
struct extent_to_insert_t {
enum class type_t {
DATA,
ZERO
};
type_t type;
/// laddr of new extent
laddr_t addr;
/// length of new extent
extent_len_t len;
/// non-nullopt if type == DATA
std::optional<bufferlist> bl;
extent_to_insert_t(const extent_to_insert_t &) = default;
extent_to_insert_t(extent_to_insert_t &&) = default;
bool is_data() const {
return type == type_t::DATA;
}
bool is_zero() const {
return type == type_t::ZERO;
}
static extent_to_insert_t create_data(
laddr_t addr, extent_len_t len, std::optional<bufferlist> bl) {
return extent_to_insert_t(addr, len, bl);
}
static extent_to_insert_t create_zero(
laddr_t addr, extent_len_t len) {
return extent_to_insert_t(addr, len);
}
private:
extent_to_insert_t(laddr_t addr, extent_len_t len,
std::optional<bufferlist> bl)
:type(type_t::DATA), addr(addr), len(len), bl(bl) {}
extent_to_insert_t(laddr_t addr, extent_len_t len)
:type(type_t::ZERO), addr(addr), len(len) {}
};
using extent_to_insert_list_t = std::list<extent_to_insert_t>;
// Encapsulates extents to be retired in do_removals.
using extent_to_remove_list_t = std::list<LBAMappingRef>;
struct overwrite_ops_t {
extent_to_remap_list_t to_remap;
extent_to_insert_list_t to_insert;
extent_to_remove_list_t to_remove;
};
// prepare to_remap, to_retire, to_insert list
overwrite_ops_t prepare_ops_list(
lba_pin_list_t &pins_to_remove,
extent_to_write_list_t &to_write) {
assert(to_write.size() != 0 && pins_to_remove.size() != 0);
overwrite_ops_t ops;
ops.to_remove.swap(pins_to_remove);
auto& front = to_write.front();
auto& back = to_write.back();
long unsigned int visitted = 0;
// prepare overwrite, happens in one original extent.
if (ops.to_remove.size() == 1 &&
front.is_existing() && back.is_existing()) {
visitted += 2;
assert(to_write.size() > 2);
assert(front.addr == front.pin->get_key());
assert(back.addr > back.pin->get_key());
ops.to_remap.push_back(extent_to_remap_t::create_overwrite(
std::move(front.pin),
front.len,
back.addr - front.addr - front.len));
ops.to_remove.pop_front();
} else {
// prepare to_remap, happens in one or multiple extents
if (front.is_existing()) {
visitted++;
assert(to_write.size() > 1);
assert(front.addr == front.pin->get_key());
ops.to_remap.push_back(extent_to_remap_t::create_remap(
std::move(front.pin),
0,
front.len));
ops.to_remove.pop_front();
}
if (back.is_existing()) {
visitted++;
assert(to_write.size() > 1);
assert(back.addr + back.len ==
back.pin->get_key() + back.pin->get_length());
ops.to_remap.push_back(extent_to_remap_t::create_remap(
std::move(back.pin),
back.addr - back.pin->get_key(),
back.len));
ops.to_remove.pop_back();
}
}
// prepare to_insert
for (auto ®ion : to_write) {
if (region.is_data()) {
visitted++;
assert(region.to_write.has_value());
ops.to_insert.push_back(extent_to_insert_t::create_data(
region.addr, region.len, region.to_write));
} else if (region.is_zero()) {
visitted++;
assert(!(region.to_write.has_value()));
ops.to_insert.push_back(extent_to_insert_t::create_zero(
region.addr, region.len));
}
}
logger().debug(
"to_remap list size: {}"
" to_insert list size: {}"
" to_remove list size: {}",
ops.to_remap.size(), ops.to_insert.size(), ops.to_remove.size());
assert(visitted == to_write.size());
return ops;
}
/**
* append_extent_to_write
*
* Appends passed extent_to_write_t maintaining invariant that the
* list may not contain consecutive zero elements by checking and
* combining them.
*/
void append_extent_to_write(
extent_to_write_list_t &to_write, extent_to_write_t &&to_append)
{
assert(to_write.empty() ||
to_write.back().get_end_addr() == to_append.addr);
if (to_write.empty() ||
to_write.back().is_data() ||
to_append.is_data() ||
to_write.back().type != to_append.type) {
to_write.push_back(std::move(to_append));
} else {
to_write.back().len += to_append.len;
}
}
/**
* splice_extent_to_write
*
* splices passed extent_to_write_list_t maintaining invariant that the
* list may not contain consecutive zero elements by checking and
* combining them.
*/
void splice_extent_to_write(
extent_to_write_list_t &to_write, extent_to_write_list_t &&to_splice)
{
if (!to_splice.empty()) {
append_extent_to_write(to_write, std::move(to_splice.front()));
to_splice.pop_front();
to_write.splice(to_write.end(), std::move(to_splice));
}
}
/// Creates remap extents in to_remap
ObjectDataHandler::write_ret do_remappings(
context_t ctx,
extent_to_remap_list_t &to_remap)
{
return trans_intr::do_for_each(
to_remap,
[ctx](auto ®ion) {
if (region.is_remap()) {
return ctx.tm.remap_pin<ObjectDataBlock, 1>(
ctx.t,
std::move(region.pin),
std::array{
region.create_remap_entry()
}
).si_then([®ion](auto pins) {
ceph_assert(pins.size() == 1);
ceph_assert(region.new_len == pins[0]->get_length());
return ObjectDataHandler::write_iertr::now();
});
} else if (region.is_overwrite()) {
return ctx.tm.remap_pin<ObjectDataBlock, 2>(
ctx.t,
std::move(region.pin),
std::array{
region.create_left_remap_entry(),
region.create_right_remap_entry()
}
).si_then([®ion](auto pins) {
ceph_assert(pins.size() == 2);
ceph_assert(region.pin->get_key() == pins[0]->get_key());
ceph_assert(region.pin->get_key() + pins[0]->get_length() +
region.new_len == pins[1]->get_key());
return ObjectDataHandler::write_iertr::now();
});
} else {
ceph_abort("impossible");
return ObjectDataHandler::write_iertr::now();
}
});
}
ObjectDataHandler::write_ret do_removals(
context_t ctx,
lba_pin_list_t &to_remove)
{
return trans_intr::do_for_each(
to_remove,
[ctx](auto &pin) {
LOG_PREFIX(object_data_handler.cc::do_removals);
DEBUGT("decreasing ref: {}",
ctx.t,
pin->get_key());
return ctx.tm.dec_ref(
ctx.t,
pin->get_key()
).si_then(
[](auto){},
ObjectDataHandler::write_iertr::pass_further{},
crimson::ct_error::assert_all{
"object_data_handler::do_removals invalid error"
}
);
});
}
/// Creates zero/data extents in to_insert
ObjectDataHandler::write_ret do_insertions(
context_t ctx,
extent_to_insert_list_t &to_insert)
{
return trans_intr::do_for_each(
to_insert,
[ctx](auto ®ion) {
LOG_PREFIX(object_data_handler.cc::do_insertions);
if (region.is_data()) {
assert_aligned(region.addr);
assert_aligned(region.len);
ceph_assert(region.len == region.bl->length());
DEBUGT("allocating extent: {}~{}",
ctx.t,
region.addr,
region.len);
return ctx.tm.alloc_extent<ObjectDataBlock>(
ctx.t,
region.addr,
region.len
).si_then([®ion](auto extent) {
if (extent->get_laddr() != region.addr) {
logger().debug(
"object_data_handler::do_insertions alloc got addr {},"
" should have been {}",
extent->get_laddr(),
region.addr);
}
ceph_assert(extent->get_laddr() == region.addr);
ceph_assert(extent->get_length() == region.len);
auto iter = region.bl->cbegin();
iter.copy(region.len, extent->get_bptr().c_str());
return ObjectDataHandler::write_iertr::now();
});
} else if (region.is_zero()) {
DEBUGT("reserving: {}~{}",
ctx.t,
region.addr,
region.len);
return ctx.tm.reserve_region(
ctx.t,
region.addr,
region.len
).si_then([FNAME, ctx, ®ion](auto pin) {
ceph_assert(pin->get_length() == region.len);
if (pin->get_key() != region.addr) {
ERRORT(
"inconsistent laddr: pin: {} region {}",
ctx.t,
pin->get_key(),
region.addr);
}
ceph_assert(pin->get_key() == region.addr);
return ObjectDataHandler::write_iertr::now();
});
} else {
ceph_abort("impossible");
return ObjectDataHandler::write_iertr::now();
}
});
}
enum class overwrite_operation_t {
UNKNOWN,
OVERWRITE_ZERO, // fill unaligned data with zero
MERGE_EXISTING, // if present, merge data with the clean/pending extent
SPLIT_EXISTING, // split the existing extent, and fill unaligned data
};
std::ostream& operator<<(
std::ostream &out,
const overwrite_operation_t &operation)
{
switch (operation) {
case overwrite_operation_t::UNKNOWN:
return out << "UNKNOWN";
case overwrite_operation_t::OVERWRITE_ZERO:
return out << "OVERWRITE_ZERO";
case overwrite_operation_t::MERGE_EXISTING:
return out << "MERGE_EXISTING";
case overwrite_operation_t::SPLIT_EXISTING:
return out << "SPLIT_EXISTING";
default:
return out << "!IMPOSSIBLE_OPERATION";
}
}
/**
* overwrite_plan_t
*
* |<--------------------------pins_size---------------------------------------------->|
* pin_begin(aligned) pin_end(aligned)
* |<------aligned_data_size-------------------------->| (aligned-bl)
* aligned_data_begin aligned_data_end
* |<-data_size->| (bl)
* data_begin end
* left(l) right(r)
* |<l_extent_size>|<l_alignment_size>| |<r_alignment_size>|<r_extent_size>|
* |<-----------left_size------------>| |<-----------right_size----------->|
*
* |<-----(existing left extent/pin)----->| |<-----(existing right extent/pin)----->|
* left_paddr right_paddr
*/
struct overwrite_plan_t {
// addresses
laddr_t pin_begin;
laddr_t pin_end;
paddr_t left_paddr;
paddr_t right_paddr;
laddr_t data_begin;
laddr_t data_end;
laddr_t aligned_data_begin;
laddr_t aligned_data_end;
// operations
overwrite_operation_t left_operation;
overwrite_operation_t right_operation;
// helper member
extent_len_t block_size;
public:
extent_len_t get_left_size() const {
return data_begin - pin_begin;
}
extent_len_t get_left_extent_size() const {
return aligned_data_begin - pin_begin;
}
extent_len_t get_left_alignment_size() const {
return data_begin - aligned_data_begin;
}
extent_len_t get_right_size() const {
return pin_end - data_end;
}
extent_len_t get_right_extent_size() const {
return pin_end - aligned_data_end;
}
extent_len_t get_right_alignment_size() const {
return aligned_data_end - data_end;
}
extent_len_t get_aligned_data_size() const {
return aligned_data_end - aligned_data_begin;
}
extent_len_t get_pins_size() const {
return pin_end - pin_begin;
}
friend std::ostream& operator<<(
std::ostream& out,
const overwrite_plan_t& overwrite_plan) {
return out << "overwrite_plan_t("
<< "pin_begin=" << overwrite_plan.pin_begin
<< ", pin_end=" << overwrite_plan.pin_end
<< ", left_paddr=" << overwrite_plan.left_paddr
<< ", right_paddr=" << overwrite_plan.right_paddr
<< ", data_begin=" << overwrite_plan.data_begin
<< ", data_end=" << overwrite_plan.data_end
<< ", aligned_data_begin=" << overwrite_plan.aligned_data_begin
<< ", aligned_data_end=" << overwrite_plan.aligned_data_end
<< ", left_operation=" << overwrite_plan.left_operation
<< ", right_operation=" << overwrite_plan.right_operation
<< ", block_size=" << overwrite_plan.block_size
<< ")";
}
overwrite_plan_t(laddr_t offset,
extent_len_t len,
const lba_pin_list_t& pins,
extent_len_t block_size) :
pin_begin(pins.front()->get_key()),
pin_end(pins.back()->get_key() + pins.back()->get_length()),
left_paddr(pins.front()->get_val()),
right_paddr(pins.back()->get_val()),
data_begin(offset),
data_end(offset + len),
aligned_data_begin(p2align((uint64_t)data_begin, (uint64_t)block_size)),
aligned_data_end(p2roundup((uint64_t)data_end, (uint64_t)block_size)),
left_operation(overwrite_operation_t::UNKNOWN),
right_operation(overwrite_operation_t::UNKNOWN),
block_size(block_size) {
validate();
evaluate_operations();
assert(left_operation != overwrite_operation_t::UNKNOWN);
assert(right_operation != overwrite_operation_t::UNKNOWN);
}
private:
// refer to overwrite_plan_t description
void validate() const {
ceph_assert(pin_begin % block_size == 0);
ceph_assert(pin_end % block_size == 0);
ceph_assert(aligned_data_begin % block_size == 0);
ceph_assert(aligned_data_end % block_size == 0);
ceph_assert(pin_begin <= aligned_data_begin);
ceph_assert(aligned_data_begin <= data_begin);
ceph_assert(data_begin <= data_end);
ceph_assert(data_end <= aligned_data_end);
ceph_assert(aligned_data_end <= pin_end);
}
/*
* When trying to modify a portion of an object data block, follow
* the read-full-extent-then-merge-new-data strategy, if the write
* amplification caused by it is not greater than
* seastore_obj_data_write_amplification; otherwise, split the
* original extent into at most three parts: origin-left, part-to-be-modified
* and origin-right.
*/
void evaluate_operations() {
auto actual_write_size = get_pins_size();
auto aligned_data_size = get_aligned_data_size();
auto left_ext_size = get_left_extent_size();
auto right_ext_size = get_right_extent_size();
if (left_paddr.is_zero()) {
actual_write_size -= left_ext_size;
left_ext_size = 0;
left_operation = overwrite_operation_t::OVERWRITE_ZERO;
// FIXME: left_paddr can be absolute and pending
} else if (left_paddr.is_relative() ||
left_paddr.is_delayed()) {
aligned_data_size += left_ext_size;
left_ext_size = 0;
left_operation = overwrite_operation_t::MERGE_EXISTING;
}
if (right_paddr.is_zero()) {
actual_write_size -= right_ext_size;
right_ext_size = 0;
right_operation = overwrite_operation_t::OVERWRITE_ZERO;
// FIXME: right_paddr can be absolute and pending
} else if (right_paddr.is_relative() ||
right_paddr.is_delayed()) {
aligned_data_size += right_ext_size;
right_ext_size = 0;
right_operation = overwrite_operation_t::MERGE_EXISTING;
}
while (left_operation == overwrite_operation_t::UNKNOWN ||
right_operation == overwrite_operation_t::UNKNOWN) {
if (((double)actual_write_size / (double)aligned_data_size) <=
crimson::common::get_conf<double>("seastore_obj_data_write_amplification")) {
break;
}
if (left_ext_size == 0 && right_ext_size == 0) {
break;
}
if (left_ext_size >= right_ext_size) {
// split left
assert(left_operation == overwrite_operation_t::UNKNOWN);
actual_write_size -= left_ext_size;
left_ext_size = 0;
left_operation = overwrite_operation_t::SPLIT_EXISTING;
} else { // left_ext_size < right_ext_size
// split right
assert(right_operation == overwrite_operation_t::UNKNOWN);
actual_write_size -= right_ext_size;
right_ext_size = 0;
right_operation = overwrite_operation_t::SPLIT_EXISTING;
}
}
if (left_operation == overwrite_operation_t::UNKNOWN) {
// no split left, so merge with left
left_operation = overwrite_operation_t::MERGE_EXISTING;
}
if (right_operation == overwrite_operation_t::UNKNOWN) {
// no split right, so merge with right
right_operation = overwrite_operation_t::MERGE_EXISTING;
}
}
};
} // namespace crimson::os::seastore
#if FMT_VERSION >= 90000
template<> struct fmt::formatter<crimson::os::seastore::overwrite_plan_t> : fmt::ostream_formatter {};
#endif
namespace crimson::os::seastore {
/**
* operate_left
*
* Proceed overwrite_plan.left_operation.
*/
using operate_ret_bare = std::pair<
std::optional<extent_to_write_t>,
std::optional<bufferptr>>;
using operate_ret = get_iertr::future<operate_ret_bare>;
operate_ret operate_left(context_t ctx, LBAMappingRef &pin, const overwrite_plan_t &overwrite_plan)
{
if (overwrite_plan.get_left_size() == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::nullopt,
std::nullopt);
}
if (overwrite_plan.left_operation == overwrite_operation_t::OVERWRITE_ZERO) {
assert(pin->get_val().is_zero());
auto zero_extent_len = overwrite_plan.get_left_extent_size();
assert_aligned(zero_extent_len);
auto zero_prepend_len = overwrite_plan.get_left_alignment_size();
return get_iertr::make_ready_future<operate_ret_bare>(
(zero_extent_len == 0
? std::nullopt
: std::make_optional(extent_to_write_t::create_zero(
overwrite_plan.pin_begin, zero_extent_len))),
(zero_prepend_len == 0
? std::nullopt
: std::make_optional(bufferptr(
ceph::buffer::create(zero_prepend_len, 0))))
);
} else if (overwrite_plan.left_operation == overwrite_operation_t::MERGE_EXISTING) {
auto prepend_len = overwrite_plan.get_left_size();
if (prepend_len == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::nullopt,
std::nullopt);
} else {
return ctx.tm.read_pin<ObjectDataBlock>(
ctx.t, pin->duplicate()
).si_then([prepend_len](auto left_extent) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::nullopt,
std::make_optional(bufferptr(
left_extent->get_bptr(),
0,
prepend_len)));
});
}
} else {
assert(overwrite_plan.left_operation == overwrite_operation_t::SPLIT_EXISTING);
auto extent_len = overwrite_plan.get_left_extent_size();
assert(extent_len);
std::optional<extent_to_write_t> left_to_write_extent =
std::make_optional(extent_to_write_t::create_existing(
pin->duplicate(),
pin->get_key(),
extent_len));
auto prepend_len = overwrite_plan.get_left_alignment_size();
if (prepend_len == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::move(left_to_write_extent),
std::nullopt);
} else {
return ctx.tm.read_pin<ObjectDataBlock>(
ctx.t, pin->duplicate()
).si_then([prepend_offset=extent_len, prepend_len,
left_to_write_extent=std::move(left_to_write_extent)]
(auto left_extent) mutable {
return get_iertr::make_ready_future<operate_ret_bare>(
std::move(left_to_write_extent),
std::make_optional(bufferptr(
left_extent->get_bptr(),
prepend_offset,
prepend_len)));
});
}
}
};
/**
* operate_right
*
* Proceed overwrite_plan.right_operation.
*/
operate_ret operate_right(context_t ctx, LBAMappingRef &pin, const overwrite_plan_t &overwrite_plan)
{
if (overwrite_plan.get_right_size() == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::nullopt,
std::nullopt);
}
auto right_pin_begin = pin->get_key();
assert(overwrite_plan.data_end >= right_pin_begin);
if (overwrite_plan.right_operation == overwrite_operation_t::OVERWRITE_ZERO) {
assert(pin->get_val().is_zero());
auto zero_suffix_len = overwrite_plan.get_right_alignment_size();
auto zero_extent_len = overwrite_plan.get_right_extent_size();
assert_aligned(zero_extent_len);
return get_iertr::make_ready_future<operate_ret_bare>(
(zero_extent_len == 0
? std::nullopt
: std::make_optional(extent_to_write_t::create_zero(
overwrite_plan.aligned_data_end, zero_extent_len))),
(zero_suffix_len == 0
? std::nullopt
: std::make_optional(bufferptr(
ceph::buffer::create(zero_suffix_len, 0))))
);
} else if (overwrite_plan.right_operation == overwrite_operation_t::MERGE_EXISTING) {
auto append_len = overwrite_plan.get_right_size();
if (append_len == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::nullopt,
std::nullopt);
} else {
auto append_offset = overwrite_plan.data_end - right_pin_begin;
return ctx.tm.read_pin<ObjectDataBlock>(
ctx.t, pin->duplicate()
).si_then([append_offset, append_len](auto right_extent) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::nullopt,
std::make_optional(bufferptr(
right_extent->get_bptr(),
append_offset,
append_len)));
});
}
} else {
assert(overwrite_plan.right_operation == overwrite_operation_t::SPLIT_EXISTING);
auto extent_len = overwrite_plan.get_right_extent_size();
assert(extent_len);
std::optional<extent_to_write_t> right_to_write_extent =
std::make_optional(extent_to_write_t::create_existing(
pin->duplicate(),
overwrite_plan.aligned_data_end,
extent_len));
auto append_len = overwrite_plan.get_right_alignment_size();
if (append_len == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
std::move(right_to_write_extent),
std::nullopt);
} else {
auto append_offset = overwrite_plan.data_end - right_pin_begin;
return ctx.tm.read_pin<ObjectDataBlock>(
ctx.t, pin->duplicate()
).si_then([append_offset, append_len,
right_to_write_extent=std::move(right_to_write_extent)]
(auto right_extent) mutable {
return get_iertr::make_ready_future<operate_ret_bare>(
std::move(right_to_write_extent),
std::make_optional(bufferptr(
right_extent->get_bptr(),
append_offset,
append_len)));
});
}
}
};
template <typename F>
auto with_object_data(
ObjectDataHandler::context_t ctx,
F &&f)
{
return seastar::do_with(
ctx.onode.get_layout().object_data.get(),
std::forward<F>(f),
[ctx](auto &object_data, auto &f) {
return std::invoke(f, object_data
).si_then([ctx, &object_data] {
if (object_data.must_update()) {
ctx.onode.get_mutable_layout(ctx.t).object_data.update(object_data);
}
return seastar::now();
});
});
}
ObjectDataHandler::write_ret ObjectDataHandler::prepare_data_reservation(
context_t ctx,
object_data_t &object_data,
extent_len_t size)
{
LOG_PREFIX(ObjectDataHandler::prepare_data_reservation);
ceph_assert(size <= max_object_size);
if (!object_data.is_null()) {
ceph_assert(object_data.get_reserved_data_len() == max_object_size);
DEBUGT("reservation present: {}~{}",
ctx.t,
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len());
return write_iertr::now();
} else {
DEBUGT("reserving: {}~{}",
ctx.t,
ctx.onode.get_data_hint(),
max_object_size);
return ctx.tm.reserve_region(
ctx.t,
ctx.onode.get_data_hint(),
max_object_size
).si_then([max_object_size=max_object_size, &object_data](auto pin) {
ceph_assert(pin->get_length() == max_object_size);
object_data.update_reserved(
pin->get_key(),
pin->get_length());
return write_iertr::now();
});
}
}
ObjectDataHandler::clear_ret ObjectDataHandler::trim_data_reservation(
context_t ctx, object_data_t &object_data, extent_len_t size)
{
ceph_assert(!object_data.is_null());
ceph_assert(size <= object_data.get_reserved_data_len());
return seastar::do_with(
lba_pin_list_t(),
extent_to_write_list_t(),
[ctx, size, &object_data](auto &pins, auto &to_write) {
LOG_PREFIX(ObjectDataHandler::trim_data_reservation);
DEBUGT("object_data: {}~{}",
ctx.t,
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len());
return ctx.tm.get_pins(
ctx.t,
object_data.get_reserved_data_base() + size,
object_data.get_reserved_data_len() - size
).si_then([ctx, size, &pins, &object_data, &to_write](auto _pins) {
_pins.swap(pins);
ceph_assert(pins.size());
auto &pin = *pins.front();
ceph_assert(pin.get_key() >= object_data.get_reserved_data_base());
ceph_assert(
pin.get_key() <= object_data.get_reserved_data_base() + size);
auto pin_offset = pin.get_key() -
object_data.get_reserved_data_base();
if ((pin.get_key() == (object_data.get_reserved_data_base() + size)) ||
(pin.get_val().is_zero())) {
/* First pin is exactly at the boundary or is a zero pin. Either way,
* remove all pins and add a single zero pin to the end. */
to_write.push_back(extent_to_write_t::create_zero(
pin.get_key(),
object_data.get_reserved_data_len() - pin_offset));
return clear_iertr::now();
} else {
/* First pin overlaps the boundary and has data, remap it
* if aligned or rewrite it if not aligned to size */
auto roundup_size = p2roundup(size, ctx.tm.get_block_size());
auto append_len = roundup_size - size;
if (append_len == 0) {
LOG_PREFIX(ObjectDataHandler::trim_data_reservation);
TRACET("First pin overlaps the boundary and has aligned data"
"create existing at addr:{}, len:{}",
ctx.t, pin.get_key(), size - pin_offset);
to_write.push_back(extent_to_write_t::create_existing(
pin.duplicate(),
pin.get_key(),
size - pin_offset));
to_write.push_back(extent_to_write_t::create_zero(
object_data.get_reserved_data_base() + roundup_size,
object_data.get_reserved_data_len() - roundup_size));
return clear_iertr::now();
} else {
return ctx.tm.read_pin<ObjectDataBlock>(
ctx.t,
pin.duplicate()
).si_then([ctx, size, pin_offset, append_len, roundup_size,
&pin, &object_data, &to_write](auto extent) {
bufferlist bl;
bl.append(
bufferptr(
extent->get_bptr(),
0,
size - pin_offset
));
bl.append_zero(append_len);
LOG_PREFIX(ObjectDataHandler::trim_data_reservation);
TRACET("First pin overlaps the boundary and has unaligned data"
"create data at addr:{}, len:{}",
ctx.t, pin.get_key(), bl.length());
to_write.push_back(extent_to_write_t::create_data(
pin.get_key(),
bl));
to_write.push_back(extent_to_write_t::create_zero(
object_data.get_reserved_data_base() + roundup_size,
object_data.get_reserved_data_len() - roundup_size));
return clear_iertr::now();
});
}
}
}).si_then([ctx, size, &to_write, &object_data, &pins] {
assert(to_write.size());
return seastar::do_with(
prepare_ops_list(pins, to_write),
[ctx, size, &object_data](auto &ops) {
return do_remappings(ctx, ops.to_remap
).si_then([ctx, &ops] {
return do_removals(ctx, ops.to_remove);
}).si_then([ctx, &ops] {
return do_insertions(ctx, ops.to_insert);
}).si_then([size, &object_data] {
if (size == 0) {
object_data.clear();
}
return ObjectDataHandler::clear_iertr::now();
});
});
});
});
}
/**
* get_to_writes_with_zero_buffer
*
* Returns extent_to_write_t's reflecting a zero region extending
* from offset~len with headptr optionally on the left and tailptr
* optionally on the right.
*/
extent_to_write_list_t get_to_writes_with_zero_buffer(
const extent_len_t block_size,
laddr_t offset, extent_len_t len,
std::optional<bufferptr> &&headptr, std::optional<bufferptr> &&tailptr)
{
auto zero_left = p2roundup(offset, (laddr_t)block_size);
auto zero_right = p2align(offset + len, (laddr_t)block_size);
auto left = headptr ? (offset - headptr->length()) : offset;
auto right = tailptr ?
(offset + len + tailptr->length()) :
(offset + len);
assert(
(headptr && ((zero_left - left) ==
p2roundup(headptr->length(), block_size))) ^
(!headptr && (zero_left == left)));
assert(
(tailptr && ((right - zero_right) ==
p2roundup(tailptr->length(), block_size))) ^
(!tailptr && (right == zero_right)));
assert(right > left);
assert((left % block_size) == 0);
assert((right % block_size) == 0);
// zero region too small for a reserved section,
// headptr and tailptr in same extent
if (zero_right <= zero_left) {
bufferlist bl;
if (headptr) {
bl.append(*headptr);
}
bl.append_zero(
right - left - bl.length() - (tailptr ? tailptr->length() : 0));
if (tailptr) {
bl.append(*tailptr);
}
assert(bl.length() % block_size == 0);
assert(bl.length() == (right - left));
extent_to_write_list_t ret;
ret.push_back(extent_to_write_t::create_data(left, bl));
return ret;
} else {
// reserved section between ends, headptr and tailptr in different extents
extent_to_write_list_t ret;
if (headptr) {
bufferlist headbl;
headbl.append(*headptr);
headbl.append_zero(zero_left - left - headbl.length());
assert(headbl.length() % block_size == 0);
assert(headbl.length() > 0);
ret.push_back(extent_to_write_t::create_data(left, headbl));
}
// reserved zero region
ret.push_back(extent_to_write_t::create_zero(zero_left, zero_right - zero_left));
assert(ret.back().len % block_size == 0);
assert(ret.back().len > 0);
if (tailptr) {
bufferlist tailbl;
tailbl.append(*tailptr);
tailbl.append_zero(right - zero_right - tailbl.length());
assert(tailbl.length() % block_size == 0);
assert(tailbl.length() > 0);
ret.push_back(extent_to_write_t::create_data(zero_right, tailbl));
}
return ret;
}
}
/**
* get_to_writes
*
* Returns extent_to_write_t's from bl.
*
* TODO: probably add some kind of upper limit on extent size.
*/
extent_to_write_list_t get_to_writes(laddr_t offset, bufferlist &bl)
{
auto ret = extent_to_write_list_t();
ret.push_back(extent_to_write_t::create_data(offset, bl));
return ret;
};
ObjectDataHandler::write_ret ObjectDataHandler::overwrite(
context_t ctx,
laddr_t offset,
extent_len_t len,
std::optional<bufferlist> &&bl,
lba_pin_list_t &&_pins)
{
if (bl.has_value()) {
assert(bl->length() == len);
}
overwrite_plan_t overwrite_plan(offset, len, _pins, ctx.tm.get_block_size());
return seastar::do_with(
std::move(_pins),
extent_to_write_list_t(),
[ctx, len, offset, overwrite_plan, bl=std::move(bl)]
(auto &pins, auto &to_write) mutable
{
LOG_PREFIX(ObjectDataHandler::overwrite);
DEBUGT("overwrite: {}~{}",
ctx.t,
offset,
len);
ceph_assert(pins.size() >= 1);
DEBUGT("overwrite: split overwrite_plan {}", ctx.t, overwrite_plan);
return operate_left(
ctx,
pins.front(),
overwrite_plan
).si_then([ctx, len, offset, overwrite_plan, bl=std::move(bl),
&to_write, &pins](auto p) mutable {
auto &[left_extent, headptr] = p;
if (left_extent) {
ceph_assert(left_extent->addr == overwrite_plan.pin_begin);
append_extent_to_write(to_write, std::move(*left_extent));
}
if (headptr) {
assert(headptr->length() > 0);
}
return operate_right(
ctx,
pins.back(),
overwrite_plan
).si_then([ctx, len, offset,
pin_begin=overwrite_plan.pin_begin,
pin_end=overwrite_plan.pin_end,
bl=std::move(bl), headptr=std::move(headptr),
&to_write, &pins](auto p) mutable {
auto &[right_extent, tailptr] = p;
if (bl.has_value()) {
auto write_offset = offset;
bufferlist write_bl;
if (headptr) {
write_bl.append(*headptr);
write_offset -= headptr->length();
assert_aligned(write_offset);
}
write_bl.claim_append(*bl);
if (tailptr) {
write_bl.append(*tailptr);
assert_aligned(write_bl.length());
}
splice_extent_to_write(
to_write,
get_to_writes(write_offset, write_bl));
} else {
splice_extent_to_write(
to_write,
get_to_writes_with_zero_buffer(
ctx.tm.get_block_size(),
offset,
len,
std::move(headptr),
std::move(tailptr)));
}
if (right_extent) {
ceph_assert(right_extent->get_end_addr() == pin_end);
append_extent_to_write(to_write, std::move(*right_extent));
}
assert(to_write.size());
assert(pin_begin == to_write.front().addr);
assert(pin_end == to_write.back().get_end_addr());
return seastar::do_with(
prepare_ops_list(pins, to_write),
[ctx](auto &ops) {
return do_remappings(ctx, ops.to_remap
).si_then([ctx, &ops] {
return do_removals(ctx, ops.to_remove);
}).si_then([ctx, &ops] {
return do_insertions(ctx, ops.to_insert);
});
});
});
});
});
}
ObjectDataHandler::zero_ret ObjectDataHandler::zero(
context_t ctx,
objaddr_t offset,
extent_len_t len)
{
return with_object_data(
ctx,
[this, ctx, offset, len](auto &object_data) {
LOG_PREFIX(ObjectDataHandler::zero);
DEBUGT("zero to {}~{}, object_data: {}~{}, is_null {}",
ctx.t,
offset,
len,
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len(),
object_data.is_null());
return prepare_data_reservation(
ctx,
object_data,
p2roundup(offset + len, ctx.tm.get_block_size())
).si_then([this, ctx, offset, len, &object_data] {
auto logical_offset = object_data.get_reserved_data_base() + offset;
return ctx.tm.get_pins(
ctx.t,
logical_offset,
len
).si_then([this, ctx, logical_offset, len](auto pins) {
return overwrite(
ctx, logical_offset, len,
std::nullopt, std::move(pins));
});
});
});
}
ObjectDataHandler::write_ret ObjectDataHandler::write(
context_t ctx,
objaddr_t offset,
const bufferlist &bl)
{
return with_object_data(
ctx,
[this, ctx, offset, &bl](auto &object_data) {
LOG_PREFIX(ObjectDataHandler::write);
DEBUGT("writing to {}~{}, object_data: {}~{}, is_null {}",
ctx.t,
offset,
bl.length(),
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len(),
object_data.is_null());
return prepare_data_reservation(
ctx,
object_data,
p2roundup(offset + bl.length(), ctx.tm.get_block_size())
).si_then([this, ctx, offset, &object_data, &bl] {
auto logical_offset = object_data.get_reserved_data_base() + offset;
return ctx.tm.get_pins(
ctx.t,
logical_offset,
bl.length()
).si_then([this, ctx,logical_offset, &bl](
auto pins) {
return overwrite(
ctx, logical_offset, bl.length(),
bufferlist(bl), std::move(pins));
});
});
});
}
ObjectDataHandler::read_ret ObjectDataHandler::read(
context_t ctx,
objaddr_t obj_offset,
extent_len_t len)
{
return seastar::do_with(
bufferlist(),
[ctx, obj_offset, len](auto &ret) {
return with_object_data(
ctx,
[ctx, obj_offset, len, &ret](const auto &object_data) {
LOG_PREFIX(ObjectDataHandler::read);
DEBUGT("reading {}~{}",
ctx.t,
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len());
/* Assumption: callers ensure that onode size is <= reserved
* size and that len is adjusted here prior to call */
ceph_assert(!object_data.is_null());
ceph_assert((obj_offset + len) <= object_data.get_reserved_data_len());
ceph_assert(len > 0);
laddr_t loffset =
object_data.get_reserved_data_base() + obj_offset;
return ctx.tm.get_pins(
ctx.t,
loffset,
len
).si_then([ctx, loffset, len, &ret](auto _pins) {
// offset~len falls within reserved region and len > 0
ceph_assert(_pins.size() >= 1);
ceph_assert((*_pins.begin())->get_key() <= loffset);
return seastar::do_with(
std::move(_pins),
loffset,
[ctx, loffset, len, &ret](auto &pins, auto ¤t) {
return trans_intr::do_for_each(
pins,
[ctx, loffset, len, ¤t, &ret](auto &pin)
-> read_iertr::future<> {
ceph_assert(current <= (loffset + len));
ceph_assert(
(loffset + len) > pin->get_key());
laddr_t end = std::min(
pin->get_key() + pin->get_length(),
loffset + len);
if (pin->get_val().is_zero()) {
ceph_assert(end > current); // See LBAManager::get_mappings
ret.append_zero(end - current);
current = end;
return seastar::now();
} else {
return ctx.tm.read_pin<ObjectDataBlock>(
ctx.t,
std::move(pin)
).si_then([&ret, ¤t, end](auto extent) {
ceph_assert(
(extent->get_laddr() + extent->get_length()) >= end);
ceph_assert(end > current);
ret.append(
bufferptr(
extent->get_bptr(),
current - extent->get_laddr(),
end - current));
current = end;
return seastar::now();
}).handle_error_interruptible(
read_iertr::pass_further{},
crimson::ct_error::assert_all{
"ObjectDataHandler::read hit invalid error"
}
);
}
});
});
});
}).si_then([&ret] {
return std::move(ret);
});
});
}
ObjectDataHandler::fiemap_ret ObjectDataHandler::fiemap(
context_t ctx,
objaddr_t obj_offset,
extent_len_t len)
{
return seastar::do_with(
std::map<uint64_t, uint64_t>(),
[ctx, obj_offset, len](auto &ret) {
return with_object_data(
ctx,
[ctx, obj_offset, len, &ret](const auto &object_data) {
LOG_PREFIX(ObjectDataHandler::fiemap);
DEBUGT(
"{}~{}, reservation {}~{}",
ctx.t,
obj_offset,
len,
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len());
/* Assumption: callers ensure that onode size is <= reserved
* size and that len is adjusted here prior to call */
ceph_assert(!object_data.is_null());
ceph_assert((obj_offset + len) <= object_data.get_reserved_data_len());
ceph_assert(len > 0);
laddr_t loffset =
object_data.get_reserved_data_base() + obj_offset;
return ctx.tm.get_pins(
ctx.t,
loffset,
len
).si_then([loffset, len, &object_data, &ret](auto &&pins) {
ceph_assert(pins.size() >= 1);
ceph_assert((*pins.begin())->get_key() <= loffset);
for (auto &&i: pins) {
if (!(i->get_val().is_zero())) {
auto ret_left = std::max(i->get_key(), loffset);
auto ret_right = std::min(
i->get_key() + i->get_length(),
loffset + len);
assert(ret_right > ret_left);
ret.emplace(
std::make_pair(
ret_left - object_data.get_reserved_data_base(),
ret_right - ret_left
));
}
}
});
}).si_then([&ret] {
return std::move(ret);
});
});
}
ObjectDataHandler::truncate_ret ObjectDataHandler::truncate(
context_t ctx,
objaddr_t offset)
{
return with_object_data(
ctx,
[this, ctx, offset](auto &object_data) {
LOG_PREFIX(ObjectDataHandler::truncate);
DEBUGT("truncating {}~{} offset: {}",
ctx.t,
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len(),
offset);
if (offset < object_data.get_reserved_data_len()) {
return trim_data_reservation(ctx, object_data, offset);
} else if (offset > object_data.get_reserved_data_len()) {
return prepare_data_reservation(
ctx,
object_data,
p2roundup(offset, ctx.tm.get_block_size()));
} else {
return truncate_iertr::now();
}
});
}
ObjectDataHandler::clear_ret ObjectDataHandler::clear(
context_t ctx)
{
return with_object_data(
ctx,
[this, ctx](auto &object_data) {
LOG_PREFIX(ObjectDataHandler::clear);
DEBUGT("clearing: {}~{}",
ctx.t,
object_data.get_reserved_data_base(),
object_data.get_reserved_data_len());
if (object_data.is_null()) {
return clear_iertr::now();
}
return trim_data_reservation(ctx, object_data, 0);
});
}
} // namespace crimson::os::seastore
| 45,422 | 30.347826 | 102 | cc |
null | ceph-main/src/crimson/os/seastore/object_data_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <limits>
#include "include/buffer.h"
#include "test/crimson/seastore/test_block.h" // TODO
#include "crimson/os/seastore/onode.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/transaction.h"
namespace crimson::os::seastore {
struct ObjectDataBlock : crimson::os::seastore::LogicalCachedExtent {
using Ref = TCachedExtentRef<ObjectDataBlock>;
ObjectDataBlock(ceph::bufferptr &&ptr)
: LogicalCachedExtent(std::move(ptr)) {}
ObjectDataBlock(const ObjectDataBlock &other)
: LogicalCachedExtent(other) {}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new ObjectDataBlock(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::OBJECT_DATA_BLOCK;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final {
/* Currently, we always allocate fresh ObjectDataBlock's rather than
* mutating existing ones. */
ceph_assert(0 == "Should be impossible");
}
void apply_delta(const ceph::bufferlist &bl) final {
// See get_delta()
ceph_assert(0 == "Should be impossible");
}
};
using ObjectDataBlockRef = TCachedExtentRef<ObjectDataBlock>;
class ObjectDataHandler {
public:
using base_iertr = TransactionManager::base_iertr;
ObjectDataHandler(uint32_t mos) : max_object_size(mos) {}
struct context_t {
TransactionManager &tm;
Transaction &t;
Onode &onode;
};
/// Writes bl to [offset, offset + bl.length())
using write_iertr = base_iertr;
using write_ret = write_iertr::future<>;
write_ret write(
context_t ctx,
objaddr_t offset,
const bufferlist &bl);
using zero_iertr = base_iertr;
using zero_ret = zero_iertr::future<>;
zero_ret zero(
context_t ctx,
objaddr_t offset,
extent_len_t len);
/// Reads data in [offset, offset + len)
using read_iertr = base_iertr;
using read_ret = read_iertr::future<bufferlist>;
read_ret read(
context_t ctx,
objaddr_t offset,
extent_len_t len);
/// sparse read data, get range interval in [offset, offset + len)
using fiemap_iertr = base_iertr;
using fiemap_ret = fiemap_iertr::future<std::map<uint64_t, uint64_t>>;
fiemap_ret fiemap(
context_t ctx,
objaddr_t offset,
extent_len_t len);
/// Clears data past offset
using truncate_iertr = base_iertr;
using truncate_ret = truncate_iertr::future<>;
truncate_ret truncate(
context_t ctx,
objaddr_t offset);
/// Clears data and reservation
using clear_iertr = base_iertr;
using clear_ret = clear_iertr::future<>;
clear_ret clear(context_t ctx);
private:
/// Updates region [_offset, _offset + bl.length) to bl
write_ret overwrite(
context_t ctx, ///< [in] ctx
laddr_t offset, ///< [in] write offset
extent_len_t len, ///< [in] len to write, len == bl->length() if bl
std::optional<bufferlist> &&bl, ///< [in] buffer to write, empty for zeros
lba_pin_list_t &&pins ///< [in] set of pins overlapping above region
);
/// Ensures object_data reserved region is prepared
write_ret prepare_data_reservation(
context_t ctx,
object_data_t &object_data,
extent_len_t size);
/// Trims data past size
clear_ret trim_data_reservation(
context_t ctx,
object_data_t &object_data,
extent_len_t size);
private:
/**
* max_object_size
*
* For now, we allocate a fixed region of laddr space of size max_object_size
* for any object. In the future, once we have the ability to remap logical
* mappings (necessary for clone), we'll add the ability to grow and shrink
* these regions and remove this assumption.
*/
const uint32_t max_object_size = 0;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::ObjectDataBlock> : fmt::ostream_formatter {};
#endif
| 3,997 | 27.15493 | 102 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <experimental/iterator>
#include <iostream>
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/omap_manager/btree/btree_omap_manager.h"
namespace crimson::os::seastore::omap_manager {
OMapManagerRef create_omap_manager(TransactionManager &trans_manager) {
return OMapManagerRef(new BtreeOMapManager(trans_manager));
}
}
namespace std {
std::ostream &operator<<(std::ostream &out, const std::pair<std::string, std::string> &rhs)
{
return out << "key_value_map (" << rhs.first<< "->" << rhs.second << ")";
}
}
namespace crimson::os::seastore {
std::ostream &operator<<(std::ostream &out, const std::list<std::string> &rhs)
{
out << '[';
std::copy(std::begin(rhs), std::end(rhs), std::experimental::make_ostream_joiner(out, ", "));
return out << ']';
}
std::ostream &operator<<(std::ostream &out, const std::vector<std::pair<std::string, std::string>> &rhs)
{
out << '[';
std::ostream_iterator<std::pair<std::string, std::string>> out_it(out, ", ");
std::copy(rhs.begin(), rhs.end(), out_it);
return out << ']';
}
}
| 1,225 | 27.511628 | 104 | cc |
null | ceph-main/src/crimson/os/seastore/omap_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#define OMAP_INNER_BLOCK_SIZE 4096
#define OMAP_LEAF_BLOCK_SIZE 8192
namespace crimson::os::seastore {
std::ostream &operator<<(std::ostream &out, const std::list<std::string> &rhs);
std::ostream &operator<<(std::ostream &out, const std::map<std::string, std::string> &rhs);
class OMapManager {
/* all OMapManager API use reference to transfer input string parameters,
* the upper caller should guarantee the referenced string values alive (not freed)
* until these functions future resolved.
*/
public:
using base_iertr = TransactionManager::base_iertr;
/**
* allocate omap tree root node
*
* @param Transaction &t, current transaction
* @retval return the omap_root_t structure.
*/
using initialize_omap_iertr = base_iertr;
using initialize_omap_ret = initialize_omap_iertr::future<omap_root_t>;
virtual initialize_omap_ret initialize_omap(Transaction &t, laddr_t hint) = 0;
/**
* get value(string) by key(string)
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &key, omap string key
* @retval return string key->string value mapping pair.
*/
using omap_get_value_iertr = base_iertr;
using omap_get_value_ret = omap_get_value_iertr::future<
std::optional<bufferlist>>;
virtual omap_get_value_ret omap_get_value(
const omap_root_t &omap_root,
Transaction &t,
const std::string &key) = 0;
/**
* set key value mapping in omap
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &key, omap string key
* @param string &value, mapped value corresponding key
*/
using omap_set_key_iertr = base_iertr;
using omap_set_key_ret = omap_set_key_iertr::future<>;
virtual omap_set_key_ret omap_set_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key,
const ceph::bufferlist &value) = 0;
using omap_set_keys_iertr = base_iertr;
using omap_set_keys_ret = omap_set_keys_iertr::future<>;
virtual omap_set_keys_ret omap_set_keys(
omap_root_t &omap_root,
Transaction &t,
std::map<std::string, ceph::bufferlist>&& keys) = 0;
/**
* remove key value mapping in omap tree
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &key, omap string key
*/
using omap_rm_key_iertr = base_iertr;
using omap_rm_key_ret = omap_rm_key_iertr::future<>;
virtual omap_rm_key_ret omap_rm_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key) = 0;
/**
* omap_list
*
* Scans key/value pairs in order.
*
* @param omap_root: omap btree root information
* @param t: current transaction
* @param first: range start, nullopt sorts before any string,
* behavior based on config.inclusive,
* must alive during the call
* @param last: range end, nullopt sorts after any string,
* behavior based on config.inclusive,
* must alive during the call
* @param config: see below for params
* @retval listed key->value and bool indicating complete
*/
struct omap_list_config_t {
/// max results to return
size_t max_result_size = 128;
/// true denotes behavior like lower_bound, upper_bound otherwise
/// range start behavior
bool first_inclusive = false;
/// range end behavior
bool last_inclusive = false;
omap_list_config_t(
size_t max_result_size,
bool first_inclusive,
bool last_inclusive)
: max_result_size(max_result_size),
first_inclusive(first_inclusive),
last_inclusive(last_inclusive) {}
omap_list_config_t() {}
omap_list_config_t(const omap_list_config_t &) = default;
omap_list_config_t(omap_list_config_t &&) = default;
omap_list_config_t &operator=(const omap_list_config_t &) = default;
omap_list_config_t &operator=(omap_list_config_t &&) = default;
auto with_max(size_t max) {
this->max_result_size = max;
return *this;
}
auto without_max() {
this->max_result_size = std::numeric_limits<size_t>::max();
return *this;
}
auto with_inclusive(
bool first_inclusive,
bool last_inclusive) {
this->first_inclusive = first_inclusive;
this->last_inclusive = last_inclusive;
return *this;
}
auto with_reduced_max(size_t reduced_by) const {
assert(reduced_by <= max_result_size);
return omap_list_config_t(
max_result_size - reduced_by,
first_inclusive,
last_inclusive);
}
};
using omap_list_iertr = base_iertr;
using omap_list_bare_ret = std::tuple<
bool,
std::map<std::string, bufferlist, std::less<>>>;
using omap_list_ret = omap_list_iertr::future<omap_list_bare_ret>;
virtual omap_list_ret omap_list(
const omap_root_t &omap_root,
Transaction &t,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config = omap_list_config_t()) = 0;
/**
* remove key value mappings in a key range from omap tree
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
* @param string &first, range start
* @param string &last, range end
*/
using omap_rm_key_range_iertr = base_iertr;
using omap_rm_key_range_ret = omap_rm_key_range_iertr::future<>;
virtual omap_rm_key_range_ret omap_rm_key_range(
omap_root_t &omap_root,
Transaction &t,
const std::string &first,
const std::string &last,
omap_list_config_t config) = 0;
/**
* clear all omap tree key->value mapping
*
* @param omap_root_t &omap_root, omap btree root information
* @param Transaction &t, current transaction
*/
using omap_clear_iertr = base_iertr;
using omap_clear_ret = omap_clear_iertr::future<>;
virtual omap_clear_ret omap_clear(omap_root_t &omap_root, Transaction &t) = 0;
virtual ~OMapManager() {}
};
using OMapManagerRef = std::unique_ptr<OMapManager>;
namespace omap_manager {
OMapManagerRef create_omap_manager (
TransactionManager &trans_manager);
}
}
| 6,586 | 30.218009 | 91 | h |
null | ceph-main/src/crimson/os/seastore/onode.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "onode.h"
#include <iostream>
namespace crimson::os::seastore {
std::ostream& operator<<(std::ostream &out, const Onode &rhs)
{
auto &layout = rhs.get_layout();
return out << "Onode("
<< "size=" << static_cast<uint32_t>(layout.size)
<< ")";
}
}
| 389 | 19.526316 | 72 | cc |
null | ceph-main/src/crimson/os/seastore/onode.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "include/byteorder.h"
#include "seastore_types.h"
namespace crimson::os::seastore {
struct onode_layout_t {
// The expected decode size of object_info_t without oid.
static constexpr int MAX_OI_LENGTH = 232;
// We might want to move the ss field out of onode_layout_t.
// The reason is that ss_attr may grow to relative large, as
// its clone_overlap may grow to a large size, if applications
// set objects to a relative large size(for the purpose of reducing
// the number of objects per OSD, so that all objects' metadata
// can be cached in memory) and do many modifications between
// snapshots.
// TODO: implement flexible-sized onode value to store inline ss_attr
// effectively.
static constexpr int MAX_SS_LENGTH = 1;
ceph_le32 size{0};
ceph_le32 oi_size{0};
ceph_le32 ss_size{0};
omap_root_le_t omap_root;
omap_root_le_t xattr_root;
object_data_le_t object_data;
char oi[MAX_OI_LENGTH];
char ss[MAX_SS_LENGTH];
} __attribute__((packed));
class Transaction;
/**
* Onode
*
* Interface manipulated by seastore. OnodeManager implementations should
* return objects derived from this interface with layout referencing
* internal representation of onode_layout_t.
*/
class Onode : public boost::intrusive_ref_counter<
Onode,
boost::thread_unsafe_counter>
{
protected:
virtual laddr_t get_hint() const = 0;
const uint32_t default_metadata_offset = 0;
const uint32_t default_metadata_range = 0;
public:
Onode(uint32_t ddr, uint32_t dmr)
: default_metadata_offset(ddr),
default_metadata_range(dmr)
{}
virtual const onode_layout_t &get_layout() const = 0;
virtual onode_layout_t &get_mutable_layout(Transaction &t) = 0;
virtual ~Onode() = default;
laddr_t get_metadata_hint(uint64_t block_size) const {
assert(default_metadata_offset);
assert(default_metadata_range);
uint64_t range_blocks = default_metadata_range / block_size;
return get_hint() + default_metadata_offset +
(((uint32_t)std::rand() % range_blocks) * block_size);
}
laddr_t get_data_hint() const {
return get_hint();
}
};
std::ostream& operator<<(std::ostream &out, const Onode &rhs);
using OnodeRef = boost::intrusive_ptr<Onode>;
}
#if FMT_VERSION >= 90000
template<> struct fmt::formatter<crimson::os::seastore::Onode> : fmt::ostream_formatter {};
#endif
| 2,578 | 27.977528 | 91 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/buffer_fwd.h"
#include "include/ceph_assert.h"
#include "common/hobject.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/onode.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/osd/exceptions.h"
namespace crimson::os::seastore {
class OnodeManager {
using base_iertr = TransactionManager::base_iertr;
public:
using mkfs_iertr = base_iertr;
using mkfs_ret = mkfs_iertr::future<>;
virtual mkfs_ret mkfs(Transaction &t) = 0;
using contains_onode_iertr = base_iertr;
using contains_onode_ret = contains_onode_iertr::future<bool>;
virtual contains_onode_ret contains_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
using get_onode_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
using get_onode_ret = get_onode_iertr::future<
OnodeRef>;
virtual get_onode_ret get_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
using get_or_create_onode_iertr = base_iertr::extend<
crimson::ct_error::value_too_large>;
using get_or_create_onode_ret = get_or_create_onode_iertr::future<
OnodeRef>;
virtual get_or_create_onode_ret get_or_create_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
using get_or_create_onodes_iertr = base_iertr::extend<
crimson::ct_error::value_too_large>;
using get_or_create_onodes_ret = get_or_create_onodes_iertr::future<
std::vector<OnodeRef>>;
virtual get_or_create_onodes_ret get_or_create_onodes(
Transaction &trans,
const std::vector<ghobject_t> &hoids) = 0;
using write_dirty_iertr = base_iertr;
using write_dirty_ret = write_dirty_iertr::future<>;
virtual write_dirty_ret write_dirty(
Transaction &trans,
const std::vector<OnodeRef> &onodes) = 0;
using erase_onode_iertr = base_iertr;
using erase_onode_ret = erase_onode_iertr::future<>;
virtual erase_onode_ret erase_onode(
Transaction &trans,
OnodeRef &onode) = 0;
using list_onodes_iertr = base_iertr;
using list_onodes_bare_ret = std::tuple<std::vector<ghobject_t>, ghobject_t>;
using list_onodes_ret = list_onodes_iertr::future<list_onodes_bare_ret>;
virtual list_onodes_ret list_onodes(
Transaction &trans,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) = 0;
virtual ~OnodeManager() {}
};
using OnodeManagerRef = std::unique_ptr<OnodeManager>;
}
| 2,691 | 29.942529 | 79 | h |
null | ceph-main/src/crimson/os/seastore/ordering_handle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/shared_mutex.hh>
#include "crimson/common/operation.h"
#include "crimson/osd/osd_operation.h"
namespace crimson::os::seastore {
struct WritePipeline {
struct ReserveProjectedUsage : OrderedExclusivePhaseT<ReserveProjectedUsage> {
constexpr static auto type_name = "WritePipeline::reserve_projected_usage";
} reserve_projected_usage;
struct OolWrites : UnorderedStageT<OolWrites> {
constexpr static auto type_name = "UnorderedStage::ool_writes_stage";
} ool_writes;
struct Prepare : OrderedExclusivePhaseT<Prepare> {
constexpr static auto type_name = "WritePipeline::prepare_phase";
} prepare;
struct DeviceSubmission : OrderedConcurrentPhaseT<DeviceSubmission> {
constexpr static auto type_name = "WritePipeline::device_submission_phase";
} device_submission;
struct Finalize : OrderedExclusivePhaseT<Finalize> {
constexpr static auto type_name = "WritePipeline::finalize_phase";
} finalize;
using BlockingEvents = std::tuple<
ReserveProjectedUsage::BlockingEvent,
OolWrites::BlockingEvent,
Prepare::BlockingEvent,
DeviceSubmission::BlockingEvent,
Finalize::BlockingEvent
>;
};
/**
* PlaceholderOperation
*
* Once seastore is more complete, I expect to update the externally
* facing interfaces to permit passing the osd level operation through.
* Until then (and for tests likely permanently) we'll use this unregistered
* placeholder for the pipeline phases necessary for journal correctness.
*/
class PlaceholderOperation : public crimson::osd::PhasedOperationT<PlaceholderOperation> {
public:
constexpr static auto type = 0U;
constexpr static auto type_name =
"crimson::os::seastore::PlaceholderOperation";
static PlaceholderOperation::IRef create() {
return IRef{new PlaceholderOperation()};
}
PipelineHandle handle;
WritePipeline::BlockingEvents tracking_events;
PipelineHandle& get_handle() {
return handle;
}
private:
void dump_detail(ceph::Formatter *f) const final {}
void print(std::ostream &) const final {}
};
struct OperationProxy {
OperationRef op;
OperationProxy(OperationRef op) : op(std::move(op)) {}
virtual seastar::future<> enter(WritePipeline::ReserveProjectedUsage&) = 0;
virtual seastar::future<> enter(WritePipeline::OolWrites&) = 0;
virtual seastar::future<> enter(WritePipeline::Prepare&) = 0;
virtual seastar::future<> enter(WritePipeline::DeviceSubmission&) = 0;
virtual seastar::future<> enter(WritePipeline::Finalize&) = 0;
virtual void exit() = 0;
virtual seastar::future<> complete() = 0;
virtual ~OperationProxy() = default;
};
template <typename OpT>
struct OperationProxyT : OperationProxy {
OperationProxyT(typename OpT::IRef op) : OperationProxy(op) {}
OpT* that() {
return static_cast<OpT*>(op.get());
}
const OpT* that() const {
return static_cast<const OpT*>(op.get());
}
seastar::future<> enter(WritePipeline::ReserveProjectedUsage& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::OolWrites& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::Prepare& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::DeviceSubmission& s) final {
return that()->enter_stage(s);
}
seastar::future<> enter(WritePipeline::Finalize& s) final {
return that()->enter_stage(s);
}
void exit() final {
return that()->handle.exit();
}
seastar::future<> complete() final {
return that()->handle.complete();
}
};
struct OrderingHandle {
// we can easily optimize this dynalloc out as all concretes are
// supposed to have exactly the same size.
std::unique_ptr<OperationProxy> op;
seastar::shared_mutex *collection_ordering_lock = nullptr;
// in the future we might add further constructors / template to type
// erasure while extracting the location of tracking events.
OrderingHandle(std::unique_ptr<OperationProxy> op) : op(std::move(op)) {}
OrderingHandle(OrderingHandle &&other)
: op(std::move(other.op)),
collection_ordering_lock(other.collection_ordering_lock) {
other.collection_ordering_lock = nullptr;
}
seastar::future<> take_collection_lock(seastar::shared_mutex &mutex) {
ceph_assert(!collection_ordering_lock);
collection_ordering_lock = &mutex;
return collection_ordering_lock->lock();
}
void maybe_release_collection_lock() {
if (collection_ordering_lock) {
collection_ordering_lock->unlock();
collection_ordering_lock = nullptr;
}
}
template <typename T>
seastar::future<> enter(T &t) {
return op->enter(t);
}
void exit() {
op->exit();
}
seastar::future<> complete() {
return op->complete();
}
~OrderingHandle() {
maybe_release_collection_lock();
}
};
inline OrderingHandle get_dummy_ordering_handle() {
using PlaceholderOpProxy = OperationProxyT<PlaceholderOperation>;
return OrderingHandle{
std::make_unique<PlaceholderOpProxy>(PlaceholderOperation::create())};
}
} // namespace crimson::os::seastore
namespace crimson {
template <>
struct EventBackendRegistry<os::seastore::PlaceholderOperation> {
static std::tuple<> get_backends() {
return {};
}
};
} // namespace crimson
| 5,404 | 28.697802 | 90 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/nvme_block_device.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
namespace crimson::os::seastore {
seastar::future<random_block_device::RBMDeviceRef>
get_rb_device(
const std::string &device)
{
return seastar::make_ready_future<random_block_device::RBMDeviceRef>(
std::make_unique<
random_block_device::nvme::NVMeBlockDevice
>(device + "/block"));
}
}
| 639 | 28.090909 | 71 | cc |
null | ceph-main/src/crimson/os/seastore/random_block_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "crimson/os/seastore/seastore_types.h"
#include "include/buffer_fwd.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/transaction.h"
#include "crimson/common/layout.h"
#include "include/buffer.h"
#include "crimson/os/seastore/device.h"
namespace crimson::os::seastore {
struct rbm_shard_info_t {
std::size_t size = 0;
uint64_t start_offset = 0;
DENC(rbm_shard_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.start_offset, p);
DENC_FINISH(p);
}
};
struct rbm_metadata_header_t {
size_t size = 0;
size_t block_size = 0;
uint64_t feature = 0;
uint64_t journal_size = 0;
checksum_t crc = 0;
device_config_t config;
unsigned int shard_num = 0;
std::vector<rbm_shard_info_t> shard_infos;
DENC(rbm_metadata_header_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.block_size, p);
denc(v.feature, p);
denc(v.journal_size, p);
denc(v.crc, p);
denc(v.config, p);
denc(v.shard_num, p);
denc(v.shard_infos, p);
DENC_FINISH(p);
}
void validate() const {
ceph_assert(shard_num == seastar::smp::count);
ceph_assert(block_size > 0);
for (unsigned int i = 0; i < seastar::smp::count; i ++) {
ceph_assert(shard_infos[i].size > block_size &&
shard_infos[i].size % block_size == 0);
ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX);
ceph_assert(journal_size > 0 &&
journal_size % block_size == 0);
ceph_assert(shard_infos[i].start_offset < size &&
shard_infos[i].start_offset % block_size == 0);
}
ceph_assert(config.spec.magic != 0);
ceph_assert(get_default_backend_of_device(config.spec.dtype) ==
backend_type_t::RANDOM_BLOCK);
ceph_assert(config.spec.id <= DEVICE_ID_MAX_VALID);
}
};
enum class rbm_extent_state_t {
FREE, // not allocated
RESERVED, // extent is reserved by alloc_new_extent, but is not persistent
ALLOCATED, // extent is persistent
};
class Device;
using rbm_abs_addr = uint64_t;
constexpr rbm_abs_addr RBM_START_ADDRESS = 0;
class RandomBlockManager {
public:
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
virtual read_ertr::future<> read(paddr_t addr, bufferptr &buffer) = 0;
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::ebadf,
crimson::ct_error::enospc,
crimson::ct_error::erange
>;
virtual write_ertr::future<> write(paddr_t addr, bufferptr &buf) = 0;
using open_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual open_ertr::future<> open() = 0;
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg>;
virtual close_ertr::future<> close() = 0;
using allocate_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enospc
>;
using allocate_ret = allocate_ertr::future<paddr_t>;
// allocator, return start addr of allocated blocks
virtual paddr_t alloc_extent(size_t size) = 0;
virtual void mark_space_used(paddr_t paddr, size_t len) = 0;
virtual void mark_space_free(paddr_t paddr, size_t len) = 0;
virtual void complete_allocation(paddr_t addr, size_t size) = 0;
virtual size_t get_size() const = 0;
virtual extent_len_t get_block_size() const = 0;
virtual uint64_t get_free_blocks() const = 0;
virtual device_id_t get_device_id() const = 0;
virtual const seastore_meta_t &get_meta() const = 0;
virtual Device* get_device() = 0;
virtual paddr_t get_start() = 0;
virtual rbm_extent_state_t get_extent_state(paddr_t addr, size_t size) = 0;
virtual size_t get_journal_size() const = 0;
virtual ~RandomBlockManager() {}
};
using RandomBlockManagerRef = std::unique_ptr<RandomBlockManager>;
inline rbm_abs_addr convert_paddr_to_abs_addr(const paddr_t& paddr) {
const blk_paddr_t& blk_addr = paddr.as_blk_paddr();
return blk_addr.get_device_off();
}
inline paddr_t convert_abs_addr_to_paddr(rbm_abs_addr addr, device_id_t d_id) {
return paddr_t::make_blk_paddr(d_id, addr);
}
namespace random_block_device {
class RBMDevice;
}
seastar::future<std::unique_ptr<random_block_device::RBMDevice>>
get_rb_device(const std::string &device);
std::ostream &operator<<(std::ostream &out, const rbm_metadata_header_t &header);
std::ostream &operator<<(std::ostream &out, const rbm_shard_info_t &shard);
}
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::rbm_shard_info_t
)
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::rbm_metadata_header_t
)
#if FMT_VERSION >= 90000
template<> struct fmt::formatter<crimson::os::seastore::rbm_metadata_header_t> : fmt::ostream_formatter {};
template<> struct fmt::formatter<crimson::os::seastore::rbm_shard_info_t> : fmt::ostream_formatter {};
#endif
| 5,353 | 29.248588 | 107 | h |
null | ceph-main/src/crimson/os/seastore/randomblock_manager_group.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <set>
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/block_rb_manager.h"
namespace crimson::os::seastore {
class RBMDeviceGroup {
public:
RBMDeviceGroup() {
rb_devices.resize(DEVICE_ID_MAX);
}
const std::set<device_id_t>& get_device_ids() const {
return device_ids;
}
std::vector<RandomBlockManager*> get_rb_managers() const {
assert(device_ids.size());
std::vector<RandomBlockManager*> ret;
for (auto& device_id : device_ids) {
auto rb_device = rb_devices[device_id].get();
assert(rb_device->get_device_id() == device_id);
ret.emplace_back(rb_device);
}
return ret;
}
void add_rb_manager(RandomBlockManagerRef rbm) {
auto device_id = rbm->get_device_id();
ceph_assert(!has_device(device_id));
rb_devices[device_id] = std::move(rbm);
device_ids.insert(device_id);
}
void reset() {
rb_devices.clear();
rb_devices.resize(DEVICE_ID_MAX);
device_ids.clear();
}
auto get_block_size() const {
assert(device_ids.size());
return rb_devices[*device_ids.begin()]->get_block_size();
}
const seastore_meta_t &get_meta() const {
assert(device_ids.size());
return rb_devices[*device_ids.begin()]->get_meta();
}
private:
bool has_device(device_id_t id) const {
assert(id <= DEVICE_ID_MAX_VALID);
return device_ids.count(id) >= 1;
}
std::vector<RandomBlockManagerRef> rb_devices;
std::set<device_id_t> device_ids;
};
using RBMDeviceGroupRef = std::unique_ptr<RBMDeviceGroup>;
}
| 1,787 | 23.833333 | 72 | h |
null | ceph-main/src/crimson/os/seastore/root_block.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/root_block.h"
#include "crimson/os/seastore/lba_manager/btree/lba_btree_node.h"
#include "crimson/os/seastore/backref/backref_tree_node.h"
namespace crimson::os::seastore {
void RootBlock::on_replace_prior(Transaction &t) {
if (!lba_root_node) {
auto &prior = static_cast<RootBlock&>(*get_prior_instance());
lba_root_node = prior.lba_root_node;
if (lba_root_node) {
((lba_manager::btree::LBANode*)lba_root_node)->root_block = this;
}
}
if (!backref_root_node) {
auto &prior = static_cast<RootBlock&>(*get_prior_instance());
backref_root_node = prior.backref_root_node;
if (backref_root_node) {
((backref::BackrefNode*)backref_root_node)->root_block = this;
}
}
}
} // namespace crimson::os::seastore
| 884 | 30.607143 | 71 | cc |
null | ceph-main/src/crimson/os/seastore/root_block.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/cached_extent.h"
namespace crimson::os::seastore {
/**
* RootBlock
*
* Holds the physical addresses of all metadata roots.
* In-memory values may be
* - absolute: reference to block which predates the current transaction
* - record_relative: reference to block updated in this transaction
* if !pending()
*
* Journal replay only considers deltas and must always discover the most
* recent value for the RootBlock. Because the contents of root_t above are
* very small, it's simplest to stash the entire root_t value into the delta
* and never actually write the RootBlock to a physical location (safe since
* nothing references the location of the RootBlock).
*
* As a result, Cache treats the root differently in a few ways including:
* - state will only ever be DIRTY or MUTATION_PENDING
* - RootBlock's never show up in the transaction fresh or dirty lists --
* there's a special Transaction::root member for when the root needs to
* be mutated.
*
* TODO: Journal trimming will need to be aware of the most recent RootBlock
* delta location, or, even easier, just always write one out with the
* mutation which changes the journal trim bound.
*/
struct RootBlock : CachedExtent {
constexpr static extent_len_t SIZE = 4<<10;
using Ref = TCachedExtentRef<RootBlock>;
root_t root;
CachedExtent* lba_root_node = nullptr;
CachedExtent* backref_root_node = nullptr;
RootBlock() : CachedExtent(zero_length_t()) {};
RootBlock(const RootBlock &rhs)
: CachedExtent(rhs),
root(rhs.root),
lba_root_node(nullptr),
backref_root_node(nullptr)
{}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new RootBlock(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::ROOT;
extent_types_t get_type() const final {
return extent_types_t::ROOT;
}
void on_replace_prior(Transaction &t) final;
/// dumps root as delta
ceph::bufferlist get_delta() final {
ceph::bufferlist bl;
ceph::buffer::ptr bptr(sizeof(root_t));
*reinterpret_cast<root_t*>(bptr.c_str()) = root;
bl.append(bptr);
return bl;
}
/// overwrites root
void apply_delta_and_adjust_crc(paddr_t base, const ceph::bufferlist &_bl) final {
assert(_bl.length() == sizeof(root_t));
ceph::bufferlist bl = _bl;
bl.rebuild();
root = *reinterpret_cast<const root_t*>(bl.front().c_str());
root.adjust_addrs_from_base(base);
}
/// Patches relative addrs in memory based on record commit addr
void on_delta_write(paddr_t record_block_offset) final {
root.adjust_addrs_from_base(record_block_offset);
}
complete_load_ertr::future<> complete_load() final {
ceph_abort_msg("Root is only written via deltas");
}
void on_initial_write() final {
ceph_abort_msg("Root is only written via deltas");
}
root_t &get_root() { return root; }
std::ostream &print_detail(std::ostream &out) const final {
return out << ", root_block(lba_root_node=" << (void*)lba_root_node
<< ", backref_root_node=" << (void*)backref_root_node
<< ")";
}
};
using RootBlockRef = RootBlock::Ref;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::RootBlock> : fmt::ostream_formatter {};
#endif
| 3,419 | 30.090909 | 96 | h |
null | ceph-main/src/crimson/os/seastore/seastore.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "seastore.h"
#include <algorithm>
#include <boost/algorithm/string/trim.hpp>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <seastar/core/file.hh>
#include <seastar/core/fstream.hh>
#include <seastar/core/metrics.hh>
#include <seastar/core/shared_mutex.hh>
#include "common/safe_io.h"
#include "include/stringify.h"
#include "os/Transaction.h"
#include "crimson/common/buffer_io.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/seastore/backref_manager.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/collection_manager/flat_collection_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h"
#include "crimson/os/seastore/omap_manager/btree/btree_omap_manager.h"
#include "crimson/os/seastore/onode_manager.h"
#include "crimson/os/seastore/object_data_handler.h"
using std::string;
using crimson::common::local_conf;
template <> struct fmt::formatter<crimson::os::seastore::op_type_t>
: fmt::formatter<std::string_view> {
using op_type_t = crimson::os::seastore::op_type_t;
// parse is inherited from formatter<string_view>.
template <typename FormatContext>
auto format(op_type_t op, FormatContext& ctx) {
std::string_view name = "unknown";
switch (op) {
case op_type_t::TRANSACTION:
name = "transaction";
break;
case op_type_t::READ:
name = "read";
break;
case op_type_t::WRITE:
name = "write";
break;
case op_type_t::GET_ATTR:
name = "get_attr";
break;
case op_type_t::GET_ATTRS:
name = "get_attrs";
break;
case op_type_t::STAT:
name = "stat";
break;
case op_type_t::OMAP_GET_VALUES:
name = "omap_get_values";
break;
case op_type_t::OMAP_LIST:
name = "omap_list";
break;
case op_type_t::MAX:
name = "unknown";
break;
}
return formatter<string_view>::format(name, ctx);
}
};
SET_SUBSYS(seastore);
namespace crimson::os::seastore {
class FileMDStore final : public SeaStore::MDStore {
std::string root;
public:
FileMDStore(const std::string& root) : root(root) {}
write_meta_ret write_meta(
const std::string& key, const std::string& value) final {
std::string path = fmt::format("{}/{}", root, key);
ceph::bufferlist bl;
bl.append(value + "\n");
return crimson::write_file(std::move(bl), path);
}
read_meta_ret read_meta(const std::string& key) final {
std::string path = fmt::format("{}/{}", root, key);
return seastar::file_exists(
path
).then([path] (bool exist) {
if (exist) {
return crimson::read_file(path)
.then([] (auto tmp_buf) {
std::string v = {tmp_buf.get(), tmp_buf.size()};
std::size_t pos = v.find("\n");
std::string str = v.substr(0, pos);
return seastar::make_ready_future<std::optional<std::string>>(str);
});
} else {
return seastar::make_ready_future<std::optional<std::string>>(std::nullopt);
}
});
}
};
using crimson::common::get_conf;
SeaStore::Shard::Shard(
std::string root,
Device* dev,
bool is_test)
:root(root),
max_object_size(
get_conf<uint64_t>("seastore_default_max_object_size")),
is_test(is_test),
throttler(
get_conf<uint64_t>("seastore_max_concurrent_transactions"))
{
device = &(dev->get_sharded_device());
register_metrics();
}
SeaStore::SeaStore(
const std::string& root,
MDStoreRef mdstore)
: root(root),
mdstore(std::move(mdstore))
{
}
SeaStore::~SeaStore() = default;
void SeaStore::Shard::register_metrics()
{
namespace sm = seastar::metrics;
using op_type_t = crimson::os::seastore::op_type_t;
std::pair<op_type_t, sm::label_instance> labels_by_op_type[] = {
{op_type_t::TRANSACTION, sm::label_instance("latency", "TRANSACTION")},
{op_type_t::READ, sm::label_instance("latency", "READ")},
{op_type_t::WRITE, sm::label_instance("latency", "WRITE")},
{op_type_t::GET_ATTR, sm::label_instance("latency", "GET_ATTR")},
{op_type_t::GET_ATTRS, sm::label_instance("latency", "GET_ATTRS")},
{op_type_t::STAT, sm::label_instance("latency", "STAT")},
{op_type_t::OMAP_GET_VALUES, sm::label_instance("latency", "OMAP_GET_VALUES")},
{op_type_t::OMAP_LIST, sm::label_instance("latency", "OMAP_LIST")},
};
for (auto& [op_type, label] : labels_by_op_type) {
auto desc = fmt::format("latency of seastore operation (optype={})",
op_type);
metrics.add_group(
"seastore",
{
sm::make_histogram(
"op_lat", [this, op_type=op_type] {
return get_latency(op_type);
},
sm::description(desc),
{label}
),
}
);
}
metrics.add_group(
"seastore",
{
sm::make_gauge(
"concurrent_transactions",
[this] {
return throttler.get_current();
},
sm::description("transactions that are running inside seastore")
),
sm::make_gauge(
"pending_transactions",
[this] {
return throttler.get_pending();
},
sm::description("transactions waiting to get "
"through seastore's throttler")
)
}
);
}
seastar::future<> SeaStore::start()
{
ceph_assert(seastar::this_shard_id() == primary_core);
#ifndef NDEBUG
bool is_test = true;
#else
bool is_test = false;
#endif
using crimson::common::get_conf;
std::string type = get_conf<std::string>("seastore_main_device_type");
device_type_t d_type = string_to_device_type(type);
assert(d_type == device_type_t::SSD ||
d_type == device_type_t::RANDOM_BLOCK_SSD);
ceph_assert(root != "");
return Device::make_device(root, d_type
).then([this](DeviceRef device_obj) {
device = std::move(device_obj);
return device->start();
}).then([this, is_test] {
ceph_assert(device);
return shard_stores.start(root, device.get(), is_test);
});
}
seastar::future<> SeaStore::test_start(DeviceRef device_obj)
{
ceph_assert(device_obj);
ceph_assert(root == "");
device = std::move(device_obj);
return shard_stores.start_single(root, device.get(), true);
}
seastar::future<> SeaStore::stop()
{
ceph_assert(seastar::this_shard_id() == primary_core);
return seastar::do_for_each(secondaries, [](auto& sec_dev) {
return sec_dev->stop();
}).then([this] {
secondaries.clear();
if (device) {
return device->stop();
} else {
return seastar::now();
}
}).then([this] {
return shard_stores.stop();
});
}
SeaStore::mount_ertr::future<> SeaStore::test_mount()
{
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.local().mount_managers();
}
SeaStore::mount_ertr::future<> SeaStore::mount()
{
ceph_assert(seastar::this_shard_id() == primary_core);
return device->mount(
).safe_then([this] {
auto sec_devices = device->get_sharded_device().get_secondary_devices();
return crimson::do_for_each(sec_devices, [this](auto& device_entry) {
device_id_t id = device_entry.first;
magic_t magic = device_entry.second.magic;
device_type_t dtype = device_entry.second.dtype;
std::string path =
fmt::format("{}/block.{}.{}", root, dtype, std::to_string(id));
return Device::make_device(path, dtype
).then([this, path, magic](DeviceRef sec_dev) {
return sec_dev->start(
).then([this, magic, sec_dev = std::move(sec_dev)]() mutable {
return sec_dev->mount(
).safe_then([this, sec_dev=std::move(sec_dev), magic]() mutable {
boost::ignore_unused(magic); // avoid clang warning;
assert(sec_dev->get_sharded_device().get_magic() == magic);
secondaries.emplace_back(std::move(sec_dev));
});
}).safe_then([this] {
return set_secondaries();
});
});
}).safe_then([this] {
return shard_stores.invoke_on_all([](auto &local_store) {
return local_store.mount_managers();
});
});
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::mount"
}
);
}
seastar::future<> SeaStore::Shard::mount_managers()
{
init_managers();
return transaction_manager->mount(
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in mount_managers"
});
}
seastar::future<> SeaStore::umount()
{
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.invoke_on_all([](auto &local_store) {
return local_store.umount();
});
}
seastar::future<> SeaStore::Shard::umount()
{
return [this] {
if (transaction_manager) {
return transaction_manager->close();
} else {
return TransactionManager::close_ertr::now();
}
}().safe_then([this] {
return crimson::do_for_each(
secondaries,
[](auto& sec_dev) -> SegmentManager::close_ertr::future<>
{
return sec_dev->close();
});
}).safe_then([this] {
return device->close();
}).safe_then([this] {
secondaries.clear();
transaction_manager.reset();
collection_manager.reset();
onode_manager.reset();
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::umount"
}
);
}
seastar::future<> SeaStore::write_fsid(uuid_d new_osd_fsid)
{
ceph_assert(seastar::this_shard_id() == primary_core);
LOG_PREFIX(SeaStore::write_fsid);
return read_meta("fsid").then([this, FNAME, new_osd_fsid] (auto tuple) {
auto [ret, fsid] = tuple;
std::string str_fsid = stringify(new_osd_fsid);
if (ret == -1) {
return write_meta("fsid", stringify(new_osd_fsid));
} else if (ret == 0 && fsid != str_fsid) {
ERROR("on-disk fsid {} != provided {}",
fsid, stringify(new_osd_fsid));
throw std::runtime_error("store fsid error");
} else {
return seastar::now();
}
});
}
seastar::future<>
SeaStore::Shard::mkfs_managers()
{
init_managers();
return transaction_manager->mkfs(
).safe_then([this] {
init_managers();
return transaction_manager->mount();
}).safe_then([this] {
return repeat_eagain([this] {
return transaction_manager->with_transaction_intr(
Transaction::src_t::MUTATE,
"mkfs_seastore",
[this](auto& t)
{
return onode_manager->mkfs(t
).si_then([this, &t] {
return collection_manager->mkfs(t);
}).si_then([this, &t](auto coll_root) {
transaction_manager->write_collection_root(
t, coll_root);
return transaction_manager->submit_transaction(t);
});
});
});
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in Shard::mkfs_managers"
}
);
}
seastar::future<> SeaStore::set_secondaries()
{
auto sec_dev_ite = secondaries.rbegin();
Device* sec_dev = sec_dev_ite->get();
return shard_stores.invoke_on_all([sec_dev](auto &local_store) {
local_store.set_secondaries(sec_dev->get_sharded_device());
});
}
SeaStore::mkfs_ertr::future<> SeaStore::test_mkfs(uuid_d new_osd_fsid)
{
ceph_assert(seastar::this_shard_id() == primary_core);
return read_meta("mkfs_done").then([this, new_osd_fsid] (auto tuple) {
auto [done, value] = tuple;
if (done == 0) {
return seastar::now();
}
return shard_stores.local().mkfs_managers(
).then([this, new_osd_fsid] {
return prepare_meta(new_osd_fsid);
});
});
}
seastar::future<> SeaStore::prepare_meta(uuid_d new_osd_fsid)
{
ceph_assert(seastar::this_shard_id() == primary_core);
return write_fsid(new_osd_fsid).then([this] {
return read_meta("type").then([this] (auto tuple) {
auto [ret, type] = tuple;
if (ret == 0 && type == "seastore") {
return seastar::now();
} else if (ret == 0 && type != "seastore") {
LOG_PREFIX(SeaStore::prepare_meta);
ERROR("expected seastore, but type is {}", type);
throw std::runtime_error("store type error");
} else {
return write_meta("type", "seastore");
}
});
}).then([this] {
return write_meta("mkfs_done", "yes");
});
}
SeaStore::mkfs_ertr::future<> SeaStore::mkfs(uuid_d new_osd_fsid)
{
ceph_assert(seastar::this_shard_id() == primary_core);
return read_meta("mkfs_done").then([this, new_osd_fsid] (auto tuple) {
auto [done, value] = tuple;
if (done == 0) {
return seastar::now();
} else {
return seastar::do_with(
secondary_device_set_t(),
[this, new_osd_fsid](auto& sds) {
auto fut = seastar::now();
LOG_PREFIX(SeaStore::mkfs);
DEBUG("root: {}", root);
if (!root.empty()) {
fut = seastar::open_directory(root
).then([this, &sds, new_osd_fsid](seastar::file rdir) mutable {
std::unique_ptr<seastar::file> root_f =
std::make_unique<seastar::file>(std::move(rdir));
auto sub = root_f->list_directory(
[this, &sds, new_osd_fsid](auto de) mutable -> seastar::future<>
{
LOG_PREFIX(SeaStore::mkfs);
DEBUG("found file: {}", de.name);
if (de.name.find("block.") == 0
&& de.name.length() > 6 /* 6 for "block." */) {
std::string entry_name = de.name;
auto dtype_end = entry_name.find_first_of('.', 6);
device_type_t dtype =
string_to_device_type(
entry_name.substr(6, dtype_end - 6));
if (dtype == device_type_t::NONE) {
// invalid device type
return seastar::now();
}
auto id = std::stoi(entry_name.substr(dtype_end + 1));
std::string path = fmt::format("{}/{}", root, entry_name);
return Device::make_device(path, dtype
).then([this, &sds, id, dtype, new_osd_fsid](DeviceRef sec_dev) {
auto p_sec_dev = sec_dev.get();
secondaries.emplace_back(std::move(sec_dev));
return p_sec_dev->start(
).then([&sds, id, dtype, new_osd_fsid, p_sec_dev]() {
magic_t magic = (magic_t)std::rand();
sds.emplace(
(device_id_t)id,
device_spec_t{magic, dtype, (device_id_t)id});
return p_sec_dev->mkfs(device_config_t::create_secondary(
new_osd_fsid, id, dtype, magic)
).handle_error(crimson::ct_error::assert_all{"not possible"});
});
}).then([this] {
return set_secondaries();
});
}
return seastar::now();
});
return sub.done().then([root_f=std::move(root_f)] {});
});
}
return fut.then([this, &sds, new_osd_fsid] {
device_id_t id = 0;
device_type_t d_type = device->get_device_type();
assert(d_type == device_type_t::SSD ||
d_type == device_type_t::RANDOM_BLOCK_SSD);
if (d_type == device_type_t::RANDOM_BLOCK_SSD) {
id = static_cast<device_id_t>(DEVICE_ID_RANDOM_BLOCK_MIN);
}
return device->mkfs(
device_config_t::create_primary(new_osd_fsid, id, d_type, sds)
);
}).safe_then([this] {
return crimson::do_for_each(secondaries, [](auto& sec_dev) {
return sec_dev->mount();
});
});
}).safe_then([this] {
return device->mount();
}).safe_then([this] {
return shard_stores.invoke_on_all([] (auto &local_store) {
return local_store.mkfs_managers();
});
}).safe_then([this, new_osd_fsid] {
return prepare_meta(new_osd_fsid);
}).safe_then([this] {
return umount();
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::mkfs"
}
);
}
});
}
using coll_core_t = FuturizedStore::coll_core_t;
seastar::future<std::vector<coll_core_t>>
SeaStore::list_collections()
{
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.map([](auto &local_store) {
return local_store.list_collections();
}).then([](std::vector<std::vector<coll_core_t>> results) {
std::vector<coll_core_t> collections;
for (auto& colls : results) {
collections.insert(collections.end(), colls.begin(), colls.end());
}
return seastar::make_ready_future<std::vector<coll_core_t>>(
std::move(collections));
});
}
store_statfs_t SeaStore::Shard::stat() const
{
return transaction_manager->store_stat();
}
seastar::future<store_statfs_t> SeaStore::stat() const
{
ceph_assert(seastar::this_shard_id() == primary_core);
LOG_PREFIX(SeaStore::stat);
DEBUG("");
return shard_stores.map_reduce0(
[](const SeaStore::Shard &local_store) {
return local_store.stat();
},
store_statfs_t(),
[](auto &&ss, auto &&ret) {
ss.add(ret);
return std::move(ss);
}
).then([](store_statfs_t ss) {
return seastar::make_ready_future<store_statfs_t>(std::move(ss));
});
}
TransactionManager::read_extent_iertr::future<std::optional<unsigned>>
SeaStore::Shard::get_coll_bits(CollectionRef ch, Transaction &t) const
{
return transaction_manager->read_collection_root(t)
.si_then([this, ch, &t](auto coll_root) {
return collection_manager->list(coll_root, t);
}).si_then([ch](auto colls) {
auto it = std::find_if(colls.begin(), colls.end(),
[ch](const std::pair<coll_t, coll_info_t>& element) {
return element.first == ch->get_cid();
});
if (it != colls.end()) {
return TransactionManager::read_extent_iertr::make_ready_future<
std::optional<unsigned>>(it->second.split_bits);
} else {
return TransactionManager::read_extent_iertr::make_ready_future<
std::optional<unsigned>>(std::nullopt);
}
});
}
col_obj_ranges_t
SeaStore::get_objs_range(CollectionRef ch, unsigned bits)
{
col_obj_ranges_t obj_ranges;
spg_t pgid;
constexpr uint32_t MAX_HASH = std::numeric_limits<uint32_t>::max();
const std::string_view MAX_NSPACE = "\xff";
if (ch->get_cid().is_pg(&pgid)) {
obj_ranges.obj_begin.shard_id = pgid.shard;
obj_ranges.temp_begin = obj_ranges.obj_begin;
obj_ranges.obj_begin.hobj.pool = pgid.pool();
obj_ranges.temp_begin.hobj.pool = -2ll - pgid.pool();
obj_ranges.obj_end = obj_ranges.obj_begin;
obj_ranges.temp_end = obj_ranges.temp_begin;
uint32_t reverse_hash = hobject_t::_reverse_bits(pgid.ps());
obj_ranges.obj_begin.hobj.set_bitwise_key_u32(reverse_hash);
obj_ranges.temp_begin.hobj.set_bitwise_key_u32(reverse_hash);
uint64_t end_hash = reverse_hash + (1ull << (32 - bits));
if (end_hash > MAX_HASH) {
// make sure end hobj is even greater than the maximum possible hobj
obj_ranges.obj_end.hobj.set_bitwise_key_u32(MAX_HASH);
obj_ranges.temp_end.hobj.set_bitwise_key_u32(MAX_HASH);
obj_ranges.obj_end.hobj.nspace = MAX_NSPACE;
} else {
obj_ranges.obj_end.hobj.set_bitwise_key_u32(end_hash);
obj_ranges.temp_end.hobj.set_bitwise_key_u32(end_hash);
}
} else {
obj_ranges.obj_begin.shard_id = shard_id_t::NO_SHARD;
obj_ranges.obj_begin.hobj.pool = -1ull;
obj_ranges.obj_end = obj_ranges.obj_begin;
obj_ranges.obj_begin.hobj.set_bitwise_key_u32(0);
obj_ranges.obj_end.hobj.set_bitwise_key_u32(MAX_HASH);
obj_ranges.obj_end.hobj.nspace = MAX_NSPACE;
// no separate temp section
obj_ranges.temp_begin = obj_ranges.obj_end;
obj_ranges.temp_end = obj_ranges.obj_end;
}
obj_ranges.obj_begin.generation = 0;
obj_ranges.obj_end.generation = 0;
obj_ranges.temp_begin.generation = 0;
obj_ranges.temp_end.generation = 0;
return obj_ranges;
}
static std::list<std::pair<ghobject_t, ghobject_t>>
get_ranges(CollectionRef ch,
ghobject_t start,
ghobject_t end,
col_obj_ranges_t obj_ranges)
{
ceph_assert(start <= end);
std::list<std::pair<ghobject_t, ghobject_t>> ranges;
if (start < obj_ranges.temp_end) {
ranges.emplace_back(
std::max(obj_ranges.temp_begin, start),
std::min(obj_ranges.temp_end, end));
}
if (end > obj_ranges.obj_begin) {
ranges.emplace_back(
std::max(obj_ranges.obj_begin, start),
std::min(obj_ranges.obj_end, end));
}
return ranges;
}
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>>
SeaStore::Shard::list_objects(CollectionRef ch,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const
{
ceph_assert(start <= end);
using list_iertr = OnodeManager::list_onodes_iertr;
using RetType = typename OnodeManager::list_onodes_bare_ret;
return seastar::do_with(
RetType(std::vector<ghobject_t>(), start),
std::move(limit),
[this, ch, start, end](auto& ret, auto& limit) {
return repeat_eagain([this, ch, start, end, &limit, &ret] {
return transaction_manager->with_transaction_intr(
Transaction::src_t::READ,
"list_objects",
[this, ch, start, end, &limit, &ret](auto &t)
{
return get_coll_bits(
ch, t
).si_then([this, ch, &t, start, end, &limit, &ret](auto bits) {
if (!bits) {
return list_iertr::make_ready_future<
OnodeManager::list_onodes_bare_ret
>(std::make_tuple(
std::vector<ghobject_t>(),
ghobject_t::get_max()));
} else {
auto filter = SeaStore::get_objs_range(ch, *bits);
using list_iertr = OnodeManager::list_onodes_iertr;
using repeat_ret = list_iertr::future<seastar::stop_iteration>;
return trans_intr::repeat(
[this, &t, &ret, &limit,
filter, ranges = get_ranges(ch, start, end, filter)
]() mutable -> repeat_ret {
if (limit == 0 || ranges.empty()) {
return list_iertr::make_ready_future<
seastar::stop_iteration
>(seastar::stop_iteration::yes);
}
auto ite = ranges.begin();
auto pstart = ite->first;
auto pend = ite->second;
ranges.pop_front();
return onode_manager->list_onodes(
t, pstart, pend, limit
).si_then([&limit, &ret, pend](auto &&_ret) mutable {
auto &next_objects = std::get<0>(_ret);
auto &ret_objects = std::get<0>(ret);
ret_objects.insert(
ret_objects.end(),
next_objects.begin(),
next_objects.end());
std::get<1>(ret) = std::get<1>(_ret);
assert(limit >= next_objects.size());
limit -= next_objects.size();
assert(limit == 0 ||
std::get<1>(_ret) == pend ||
std::get<1>(_ret) == ghobject_t::get_max());
return list_iertr::make_ready_future<
seastar::stop_iteration
>(seastar::stop_iteration::no);
});
}).si_then([&ret] {
return list_iertr::make_ready_future<
OnodeManager::list_onodes_bare_ret>(std::move(ret));
});
}
});
}).safe_then([&ret](auto&& _ret) {
ret = std::move(_ret);
});
}).safe_then([&ret] {
return std::move(ret);
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::list_objects"
}
);
});
}
seastar::future<CollectionRef>
SeaStore::Shard::create_new_collection(const coll_t& cid)
{
LOG_PREFIX(SeaStore::create_new_collection);
DEBUG("{}", cid);
return seastar::make_ready_future<CollectionRef>(_get_collection(cid));
}
seastar::future<CollectionRef>
SeaStore::Shard::open_collection(const coll_t& cid)
{
LOG_PREFIX(SeaStore::open_collection);
DEBUG("{}", cid);
return list_collections().then([cid, this] (auto colls_cores) {
if (auto found = std::find(colls_cores.begin(),
colls_cores.end(),
std::make_pair(cid, seastar::this_shard_id()));
found != colls_cores.end()) {
return seastar::make_ready_future<CollectionRef>(_get_collection(cid));
} else {
return seastar::make_ready_future<CollectionRef>();
}
});
}
seastar::future<std::vector<coll_core_t>>
SeaStore::Shard::list_collections()
{
return seastar::do_with(
std::vector<coll_core_t>(),
[this](auto &ret) {
return repeat_eagain([this, &ret] {
return transaction_manager->with_transaction_intr(
Transaction::src_t::READ,
"list_collections",
[this, &ret](auto& t)
{
return transaction_manager->read_collection_root(t
).si_then([this, &t](auto coll_root) {
return collection_manager->list(coll_root, t);
}).si_then([&ret](auto colls) {
ret.resize(colls.size());
std::transform(
colls.begin(), colls.end(), ret.begin(),
[](auto p) {
return std::make_pair(p.first, seastar::this_shard_id());
});
});
});
}).safe_then([&ret] {
return seastar::make_ready_future<std::vector<coll_core_t>>(ret);
});
}
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::list_collections"
}
);
}
SeaStore::Shard::read_errorator::future<ceph::bufferlist>
SeaStore::Shard::read(
CollectionRef ch,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags)
{
LOG_PREFIX(SeaStore::read);
DEBUG("oid {} offset {} len {}", oid, offset, len);
return repeat_with_onode<ceph::bufferlist>(
ch,
oid,
Transaction::src_t::READ,
"read_obj",
op_type_t::READ,
[=, this](auto &t, auto &onode) -> ObjectDataHandler::read_ret {
size_t size = onode.get_layout().size;
if (offset >= size) {
return seastar::make_ready_future<ceph::bufferlist>();
}
size_t corrected_len = (len == 0) ?
size - offset :
std::min(size - offset, len);
return ObjectDataHandler(max_object_size).read(
ObjectDataHandler::context_t{
*transaction_manager,
t,
onode,
},
offset,
corrected_len);
});
}
SeaStore::Shard::read_errorator::future<ceph::bufferlist>
SeaStore::Shard::readv(
CollectionRef ch,
const ghobject_t& _oid,
interval_set<uint64_t>& m,
uint32_t op_flags)
{
return seastar::do_with(
_oid,
ceph::bufferlist{},
[=, this, &m](auto &oid, auto &ret) {
return crimson::do_for_each(
m,
[=, this, &oid, &ret](auto &p) {
return read(
ch, oid, p.first, p.second, op_flags
).safe_then([&ret](auto bl) {
ret.claim_append(bl);
});
}).safe_then([&ret] {
return read_errorator::make_ready_future<ceph::bufferlist>
(std::move(ret));
});
});
return read_errorator::make_ready_future<ceph::bufferlist>();
}
using crimson::os::seastore::omap_manager::BtreeOMapManager;
SeaStore::Shard::get_attr_errorator::future<ceph::bufferlist>
SeaStore::Shard::get_attr(
CollectionRef ch,
const ghobject_t& oid,
std::string_view name) const
{
auto c = static_cast<SeastoreCollection*>(ch.get());
LOG_PREFIX(SeaStore::get_attr);
DEBUG("{} {}", c->get_cid(), oid);
return repeat_with_onode<ceph::bufferlist>(
c,
oid,
Transaction::src_t::READ,
"get_attr",
op_type_t::GET_ATTR,
[=, this](auto &t, auto& onode) -> _omap_get_value_ret {
auto& layout = onode.get_layout();
if (name == OI_ATTR && layout.oi_size) {
ceph::bufferlist bl;
bl.append(ceph::bufferptr(&layout.oi[0], layout.oi_size));
return seastar::make_ready_future<ceph::bufferlist>(std::move(bl));
}
if (name == SS_ATTR && layout.ss_size) {
ceph::bufferlist bl;
bl.append(ceph::bufferptr(&layout.ss[0], layout.ss_size));
return seastar::make_ready_future<ceph::bufferlist>(std::move(bl));
}
return _omap_get_value(
t,
layout.xattr_root.get(
onode.get_metadata_hint(device->get_block_size())),
name);
}
).handle_error(crimson::ct_error::input_output_error::handle([FNAME] {
ERROR("EIO when getting attrs");
abort();
}), crimson::ct_error::pass_further_all{});
}
SeaStore::Shard::get_attrs_ertr::future<SeaStore::Shard::attrs_t>
SeaStore::Shard::get_attrs(
CollectionRef ch,
const ghobject_t& oid)
{
LOG_PREFIX(SeaStore::get_attrs);
auto c = static_cast<SeastoreCollection*>(ch.get());
DEBUG("{} {}", c->get_cid(), oid);
return repeat_with_onode<attrs_t>(
c,
oid,
Transaction::src_t::READ,
"get_addrs",
op_type_t::GET_ATTRS,
[=, this](auto &t, auto& onode) {
auto& layout = onode.get_layout();
return omap_list(onode, layout.xattr_root, t, std::nullopt,
OMapManager::omap_list_config_t().with_inclusive(false, false)
).si_then([&layout](auto p) {
auto& attrs = std::get<1>(p);
ceph::bufferlist bl;
if (layout.oi_size) {
bl.append(ceph::bufferptr(&layout.oi[0], layout.oi_size));
attrs.emplace(OI_ATTR, std::move(bl));
}
if (layout.ss_size) {
bl.clear();
bl.append(ceph::bufferptr(&layout.ss[0], layout.ss_size));
attrs.emplace(SS_ATTR, std::move(bl));
}
return seastar::make_ready_future<omap_values_t>(std::move(attrs));
});
}
).handle_error(crimson::ct_error::input_output_error::handle([FNAME] {
ERROR("EIO when getting attrs");
abort();
}), crimson::ct_error::pass_further_all{});
}
seastar::future<struct stat> SeaStore::Shard::stat(
CollectionRef c,
const ghobject_t& oid)
{
LOG_PREFIX(SeaStore::stat);
return repeat_with_onode<struct stat>(
c,
oid,
Transaction::src_t::READ,
"stat",
op_type_t::STAT,
[=, this, &oid](auto &t, auto &onode) {
struct stat st;
auto &olayout = onode.get_layout();
st.st_size = olayout.size;
st.st_blksize = device->get_block_size();
st.st_blocks = (st.st_size + st.st_blksize - 1) / st.st_blksize;
st.st_nlink = 1;
DEBUGT("cid {}, oid {}, return size {}", t, c->get_cid(), oid, st.st_size);
return seastar::make_ready_future<struct stat>(st);
}
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::stat"
}
);
}
SeaStore::Shard::get_attr_errorator::future<ceph::bufferlist>
SeaStore::Shard::omap_get_header(
CollectionRef ch,
const ghobject_t& oid)
{
return get_attr(ch, oid, OMAP_HEADER_XATTR_KEY);
}
SeaStore::Shard::read_errorator::future<SeaStore::Shard::omap_values_t>
SeaStore::Shard::omap_get_values(
CollectionRef ch,
const ghobject_t &oid,
const omap_keys_t &keys)
{
auto c = static_cast<SeastoreCollection*>(ch.get());
return repeat_with_onode<omap_values_t>(
c,
oid,
Transaction::src_t::READ,
"omap_get_values",
op_type_t::OMAP_GET_VALUES,
[this, keys](auto &t, auto &onode) {
omap_root_t omap_root = onode.get_layout().omap_root.get(
onode.get_metadata_hint(device->get_block_size()));
return _omap_get_values(
t,
std::move(omap_root),
keys);
});
}
SeaStore::Shard::_omap_get_value_ret
SeaStore::Shard::_omap_get_value(
Transaction &t,
omap_root_t &&root,
std::string_view key) const
{
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
std::move(root),
std::string(key),
[&t](auto &manager, auto& root, auto& key) -> _omap_get_value_ret {
if (root.is_null()) {
return crimson::ct_error::enodata::make();
}
return manager.omap_get_value(root, t, key
).si_then([](auto opt) -> _omap_get_value_ret {
if (!opt) {
return crimson::ct_error::enodata::make();
}
return seastar::make_ready_future<ceph::bufferlist>(std::move(*opt));
});
}
);
}
SeaStore::Shard::_omap_get_values_ret
SeaStore::Shard::_omap_get_values(
Transaction &t,
omap_root_t &&omap_root,
const omap_keys_t &keys) const
{
if (omap_root.is_null()) {
return seastar::make_ready_future<omap_values_t>();
}
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
std::move(omap_root),
omap_values_t(),
[&](auto &manager, auto &root, auto &ret) {
return trans_intr::do_for_each(
keys.begin(),
keys.end(),
[&](auto &key) {
return manager.omap_get_value(
root,
t,
key
).si_then([&ret, &key](auto &&p) {
if (p) {
bufferlist bl;
bl.append(*p);
ret.emplace(
std::move(key),
std::move(bl));
}
return seastar::now();
});
}
).si_then([&ret] {
return std::move(ret);
});
}
);
}
SeaStore::Shard::omap_list_ret
SeaStore::Shard::omap_list(
Onode &onode,
const omap_root_le_t& omap_root,
Transaction& t,
const std::optional<std::string>& start,
OMapManager::omap_list_config_t config) const
{
auto root = omap_root.get(
onode.get_metadata_hint(device->get_block_size()));
if (root.is_null()) {
return seastar::make_ready_future<omap_list_bare_ret>(
true, omap_values_t{}
);
}
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
root,
start,
std::optional<std::string>(std::nullopt),
[&t, config](auto &manager, auto &root, auto &start, auto &end) {
return manager.omap_list(root, t, start, end, config);
});
}
SeaStore::Shard::omap_get_values_ret_t
SeaStore::Shard::omap_get_values(
CollectionRef ch,
const ghobject_t &oid,
const std::optional<string> &start)
{
auto c = static_cast<SeastoreCollection*>(ch.get());
LOG_PREFIX(SeaStore::omap_get_values);
DEBUG("{} {}", c->get_cid(), oid);
using ret_bare_t = std::tuple<bool, SeaStore::Shard::omap_values_t>;
return repeat_with_onode<ret_bare_t>(
c,
oid,
Transaction::src_t::READ,
"omap_list",
op_type_t::OMAP_LIST,
[this, start](auto &t, auto &onode) {
return omap_list(
onode,
onode.get_layout().omap_root,
t,
start,
OMapManager::omap_list_config_t().with_inclusive(false, false));
});
}
SeaStore::Shard::_fiemap_ret SeaStore::Shard::_fiemap(
Transaction &t,
Onode &onode,
uint64_t off,
uint64_t len) const
{
return seastar::do_with(
ObjectDataHandler(max_object_size),
[=, this, &t, &onode] (auto &objhandler) {
return objhandler.fiemap(
ObjectDataHandler::context_t{
*transaction_manager,
t,
onode,
},
off,
len);
});
}
SeaStore::Shard::read_errorator::future<std::map<uint64_t, uint64_t>>
SeaStore::Shard::fiemap(
CollectionRef ch,
const ghobject_t& oid,
uint64_t off,
uint64_t len)
{
LOG_PREFIX(SeaStore::fiemap);
DEBUG("oid: {}, off: {}, len: {} ", oid, off, len);
return repeat_with_onode<std::map<uint64_t, uint64_t>>(
ch,
oid,
Transaction::src_t::READ,
"fiemap_read",
op_type_t::READ,
[=, this](auto &t, auto &onode) -> _fiemap_ret {
size_t size = onode.get_layout().size;
if (off >= size) {
INFOT("fiemap offset is over onode size!", t);
return seastar::make_ready_future<std::map<uint64_t, uint64_t>>();
}
size_t adjust_len = (len == 0) ?
size - off:
std::min(size - off, len);
return _fiemap(t, onode, off, adjust_len);
});
}
void SeaStore::Shard::on_error(ceph::os::Transaction &t) {
LOG_PREFIX(SeaStore::on_error);
ERROR(" transaction dump:\n");
JSONFormatter f(true);
f.open_object_section("transaction");
t.dump(&f);
f.close_section();
std::stringstream str;
f.flush(str);
ERROR("{}", str.str());
abort();
}
seastar::future<> SeaStore::Shard::do_transaction_no_callbacks(
CollectionRef _ch,
ceph::os::Transaction&& _t)
{
// repeat_with_internal_context ensures ordering via collection lock
return repeat_with_internal_context(
_ch,
std::move(_t),
Transaction::src_t::MUTATE,
"do_transaction",
op_type_t::TRANSACTION,
[this](auto &ctx) {
return with_trans_intr(*ctx.transaction, [&, this](auto &t) {
return seastar::do_with(std::vector<OnodeRef>(ctx.iter.objects.size()),
std::vector<OnodeRef>(),
[this, &ctx](auto& onodes, auto& d_onodes) mutable {
return trans_intr::repeat(
[this, &ctx, &onodes, &d_onodes]() mutable
-> tm_iertr::future<seastar::stop_iteration>
{
if (ctx.iter.have_op()) {
return _do_transaction_step(
ctx, ctx.ch, onodes, d_onodes, ctx.iter
).si_then([] {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
} else {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
};
}).si_then([this, &ctx, &d_onodes] {
return onode_manager->write_dirty(*ctx.transaction, d_onodes);
});
}).si_then([this, &ctx] {
return transaction_manager->submit_transaction(*ctx.transaction);
});
});
});
}
seastar::future<> SeaStore::Shard::flush(CollectionRef ch)
{
return seastar::do_with(
get_dummy_ordering_handle(),
[this, ch](auto &handle) {
return handle.take_collection_lock(
static_cast<SeastoreCollection&>(*ch).ordering_lock
).then([this, &handle] {
return transaction_manager->flush(handle);
});
});
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_do_transaction_step(
internal_context_t &ctx,
CollectionRef &col,
std::vector<OnodeRef> &onodes,
std::vector<OnodeRef> &d_onodes,
ceph::os::Transaction::iterator &i)
{
auto op = i.decode_op();
using ceph::os::Transaction;
if (op->op == Transaction::OP_NOP)
return tm_iertr::now();
switch (op->op) {
case Transaction::OP_RMCOLL:
{
coll_t cid = i.get_cid(op->cid);
return _remove_collection(ctx, cid);
}
case Transaction::OP_MKCOLL:
{
coll_t cid = i.get_cid(op->cid);
return _create_collection(ctx, cid, op->split_bits);
}
case Transaction::OP_COLL_HINT:
{
ceph::bufferlist hint;
i.decode_bl(hint);
return tm_iertr::now();
}
}
using onode_iertr = OnodeManager::get_onode_iertr::extend<
crimson::ct_error::value_too_large>;
auto fut = onode_iertr::make_ready_future<OnodeRef>(OnodeRef());
bool create = false;
if (op->op == Transaction::OP_TOUCH ||
op->op == Transaction::OP_CREATE ||
op->op == Transaction::OP_WRITE ||
op->op == Transaction::OP_ZERO) {
create = true;
}
if (!onodes[op->oid]) {
if (!create) {
fut = onode_manager->get_onode(*ctx.transaction, i.get_oid(op->oid));
} else {
fut = onode_manager->get_or_create_onode(
*ctx.transaction, i.get_oid(op->oid));
}
}
return fut.si_then([&, op, this](auto&& get_onode) -> tm_ret {
LOG_PREFIX(SeaStore::_do_transaction_step);
OnodeRef &o = onodes[op->oid];
if (!o) {
assert(get_onode);
o = get_onode;
d_onodes.push_back(get_onode);
}
try {
switch (op->op) {
case Transaction::OP_REMOVE:
{
TRACET("removing {}", *ctx.transaction, i.get_oid(op->oid));
return _remove(ctx, onodes[op->oid]);
}
case Transaction::OP_CREATE:
case Transaction::OP_TOUCH:
{
return _touch(ctx, onodes[op->oid]);
}
case Transaction::OP_WRITE:
{
uint64_t off = op->off;
uint64_t len = op->len;
uint32_t fadvise_flags = i.get_fadvise_flags();
ceph::bufferlist bl;
i.decode_bl(bl);
return _write(
ctx, onodes[op->oid], off, len, std::move(bl),
fadvise_flags);
}
case Transaction::OP_TRUNCATE:
{
uint64_t off = op->off;
return _truncate(ctx, onodes[op->oid], off);
}
case Transaction::OP_SETATTR:
{
std::string name = i.decode_string();
std::map<std::string, bufferlist> to_set;
ceph::bufferlist& bl = to_set[name];
i.decode_bl(bl);
return _setattrs(ctx, onodes[op->oid], std::move(to_set));
}
case Transaction::OP_SETATTRS:
{
std::map<std::string, bufferlist> to_set;
i.decode_attrset(to_set);
return _setattrs(ctx, onodes[op->oid], std::move(to_set));
}
case Transaction::OP_RMATTR:
{
std::string name = i.decode_string();
return _rmattr(ctx, onodes[op->oid], name);
}
case Transaction::OP_RMATTRS:
{
return _rmattrs(ctx, onodes[op->oid]);
}
case Transaction::OP_OMAP_SETKEYS:
{
std::map<std::string, ceph::bufferlist> aset;
i.decode_attrset(aset);
return _omap_set_values(ctx, onodes[op->oid], std::move(aset));
}
case Transaction::OP_OMAP_SETHEADER:
{
ceph::bufferlist bl;
i.decode_bl(bl);
return _omap_set_header(ctx, onodes[op->oid], std::move(bl));
}
case Transaction::OP_OMAP_RMKEYS:
{
omap_keys_t keys;
i.decode_keyset(keys);
return _omap_rmkeys(ctx, onodes[op->oid], std::move(keys));
}
case Transaction::OP_OMAP_RMKEYRANGE:
{
string first, last;
first = i.decode_string();
last = i.decode_string();
return _omap_rmkeyrange(
ctx, onodes[op->oid],
std::move(first), std::move(last));
}
case Transaction::OP_OMAP_CLEAR:
{
return _omap_clear(ctx, onodes[op->oid]);
}
case Transaction::OP_ZERO:
{
objaddr_t off = op->off;
extent_len_t len = op->len;
return _zero(ctx, onodes[op->oid], off, len);
}
case Transaction::OP_SETALLOCHINT:
{
// TODO
return tm_iertr::now();
}
default:
ERROR("bad op {}", static_cast<unsigned>(op->op));
return crimson::ct_error::input_output_error::make();
}
} catch (std::exception &e) {
ERROR("got exception {}", e);
return crimson::ct_error::input_output_error::make();
}
}).handle_error_interruptible(
tm_iertr::pass_further{},
crimson::ct_error::enoent::handle([op] {
//OMAP_CLEAR, TRUNCATE, REMOVE etc ops will tolerate absent onode.
if (op->op == Transaction::OP_CLONERANGE ||
op->op == Transaction::OP_CLONE ||
op->op == Transaction::OP_CLONERANGE2 ||
op->op == Transaction::OP_COLL_ADD ||
op->op == Transaction::OP_SETATTR ||
op->op == Transaction::OP_SETATTRS ||
op->op == Transaction::OP_RMATTR ||
op->op == Transaction::OP_OMAP_SETKEYS ||
op->op == Transaction::OP_OMAP_RMKEYS ||
op->op == Transaction::OP_OMAP_RMKEYRANGE ||
op->op == Transaction::OP_OMAP_SETHEADER) {
ceph_abort_msg("unexpected enoent error");
}
return seastar::now();
}),
crimson::ct_error::assert_all{
"Invalid error in SeaStore::do_transaction_step"
}
);
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_remove(
internal_context_t &ctx,
OnodeRef &onode)
{
LOG_PREFIX(SeaStore::_remove);
DEBUGT("onode={}", *ctx.transaction, *onode);
auto fut = BtreeOMapManager::omap_clear_iertr::now();
auto omap_root = onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size()));
if (omap_root.get_location() != L_ADDR_NULL) {
fut = seastar::do_with(
BtreeOMapManager(*transaction_manager),
onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size())),
[&ctx, onode](auto &omap_manager, auto &omap_root) {
return omap_manager.omap_clear(
omap_root,
*ctx.transaction
);
});
}
return fut.si_then([this, &ctx, onode] {
return seastar::do_with(
ObjectDataHandler(max_object_size),
[=, this, &ctx](auto &objhandler) {
return objhandler.clear(
ObjectDataHandler::context_t{
*transaction_manager,
*ctx.transaction,
*onode,
});
});
}).si_then([this, &ctx, onode]() mutable {
return onode_manager->erase_onode(*ctx.transaction, onode);
}).handle_error_interruptible(
crimson::ct_error::input_output_error::pass_further(),
crimson::ct_error::assert_all(
"Invalid error in SeaStore::_remove"
)
);
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_touch(
internal_context_t &ctx,
OnodeRef &onode)
{
LOG_PREFIX(SeaStore::_touch);
DEBUGT("onode={}", *ctx.transaction, *onode);
return tm_iertr::now();
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_write(
internal_context_t &ctx,
OnodeRef &onode,
uint64_t offset, size_t len,
ceph::bufferlist &&_bl,
uint32_t fadvise_flags)
{
LOG_PREFIX(SeaStore::_write);
DEBUGT("onode={} {}~{}", *ctx.transaction, *onode, offset, len);
{
auto &object_size = onode->get_mutable_layout(*ctx.transaction).size;
object_size = std::max<uint64_t>(
offset + len,
object_size);
}
return seastar::do_with(
std::move(_bl),
ObjectDataHandler(max_object_size),
[=, this, &ctx, &onode](auto &bl, auto &objhandler) {
return objhandler.write(
ObjectDataHandler::context_t{
*transaction_manager,
*ctx.transaction,
*onode,
},
offset,
bl);
});
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_zero(
internal_context_t &ctx,
OnodeRef &onode,
objaddr_t offset,
extent_len_t len)
{
LOG_PREFIX(SeaStore::_zero);
DEBUGT("onode={} {}~{}", *ctx.transaction, *onode, offset, len);
if (offset + len >= max_object_size) {
return crimson::ct_error::input_output_error::make();
}
auto &object_size = onode->get_mutable_layout(*ctx.transaction).size;
object_size = std::max<uint64_t>(offset + len, object_size);
return seastar::do_with(
ObjectDataHandler(max_object_size),
[=, this, &ctx, &onode](auto &objhandler) {
return objhandler.zero(
ObjectDataHandler::context_t{
*transaction_manager,
*ctx.transaction,
*onode,
},
offset,
len);
});
}
SeaStore::Shard::omap_set_kvs_ret
SeaStore::Shard::_omap_set_kvs(
OnodeRef &onode,
const omap_root_le_t& omap_root,
Transaction& t,
omap_root_le_t& mutable_omap_root,
std::map<std::string, ceph::bufferlist>&& kvs)
{
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
omap_root.get(onode->get_metadata_hint(device->get_block_size())),
[&, keys=std::move(kvs)](auto &omap_manager, auto &root) {
tm_iertr::future<> maybe_create_root =
!root.is_null() ?
tm_iertr::now() :
omap_manager.initialize_omap(
t, onode->get_metadata_hint(device->get_block_size())
).si_then([&root](auto new_root) {
root = new_root;
});
return maybe_create_root.si_then(
[&, keys=std::move(keys)]() mutable {
return omap_manager.omap_set_keys(root, t, std::move(keys));
}).si_then([&] {
return tm_iertr::make_ready_future<omap_root_t>(std::move(root));
}).si_then([&mutable_omap_root](auto root) {
if (root.must_update()) {
mutable_omap_root.update(root);
}
});
}
);
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_omap_set_values(
internal_context_t &ctx,
OnodeRef &onode,
std::map<std::string, ceph::bufferlist> &&aset)
{
LOG_PREFIX(SeaStore::_omap_set_values);
DEBUGT("{} {} keys", *ctx.transaction, *onode, aset.size());
return _omap_set_kvs(
onode,
onode->get_layout().omap_root,
*ctx.transaction,
onode->get_mutable_layout(*ctx.transaction).omap_root,
std::move(aset));
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_omap_set_header(
internal_context_t &ctx,
OnodeRef &onode,
ceph::bufferlist &&header)
{
LOG_PREFIX(SeaStore::_omap_set_header);
DEBUGT("{} {} bytes", *ctx.transaction, *onode, header.length());
std::map<std::string, bufferlist> to_set;
to_set[OMAP_HEADER_XATTR_KEY] = header;
return _setattrs(ctx, onode,std::move(to_set));
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_omap_clear(
internal_context_t &ctx,
OnodeRef &onode)
{
LOG_PREFIX(SeaStore::_omap_clear);
DEBUGT("{} {} keys", *ctx.transaction, *onode);
return _xattr_rmattr(ctx, onode, std::string(OMAP_HEADER_XATTR_KEY))
.si_then([this, &ctx, &onode]() -> tm_ret {
if (auto omap_root = onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size()));
omap_root.is_null()) {
return seastar::now();
} else {
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size())),
[&ctx, &onode](
auto &omap_manager,
auto &omap_root) {
return omap_manager.omap_clear(
omap_root,
*ctx.transaction)
.si_then([&] {
if (omap_root.must_update()) {
onode->get_mutable_layout(*ctx.transaction
).omap_root.update(omap_root);
}
});
});
}
});
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_omap_rmkeys(
internal_context_t &ctx,
OnodeRef &onode,
omap_keys_t &&keys)
{
LOG_PREFIX(SeaStore::_omap_rmkeys);
DEBUGT("{} {} keys", *ctx.transaction, *onode, keys.size());
auto omap_root = onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size()));
if (omap_root.is_null()) {
return seastar::now();
} else {
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size())),
std::move(keys),
[&ctx, &onode](
auto &omap_manager,
auto &omap_root,
auto &keys) {
return trans_intr::do_for_each(
keys.begin(),
keys.end(),
[&](auto &p) {
return omap_manager.omap_rm_key(
omap_root,
*ctx.transaction,
p);
}
).si_then([&] {
if (omap_root.must_update()) {
onode->get_mutable_layout(*ctx.transaction
).omap_root.update(omap_root);
}
});
}
);
}
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_omap_rmkeyrange(
internal_context_t &ctx,
OnodeRef &onode,
std::string first,
std::string last)
{
LOG_PREFIX(SeaStore::_omap_rmkeyrange);
DEBUGT("{} first={} last={}", *ctx.transaction, *onode, first, last);
if (first > last) {
ERRORT("range error, first: {} > last:{}", *ctx.transaction, first, last);
ceph_abort();
}
auto omap_root = onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size()));
if (omap_root.is_null()) {
return seastar::now();
} else {
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
onode->get_layout().omap_root.get(
onode->get_metadata_hint(device->get_block_size())),
std::move(first),
std::move(last),
[&ctx, &onode](
auto &omap_manager,
auto &omap_root,
auto &first,
auto &last) {
auto config = OMapManager::omap_list_config_t()
.with_inclusive(true, false)
.without_max();
return omap_manager.omap_rm_key_range(
omap_root,
*ctx.transaction,
first,
last,
config
).si_then([&] {
if (omap_root.must_update()) {
onode->get_mutable_layout(*ctx.transaction
).omap_root.update(omap_root);
}
});
});
}
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_truncate(
internal_context_t &ctx,
OnodeRef &onode,
uint64_t size)
{
LOG_PREFIX(SeaStore::_truncate);
DEBUGT("onode={} size={}", *ctx.transaction, *onode, size);
onode->get_mutable_layout(*ctx.transaction).size = size;
return seastar::do_with(
ObjectDataHandler(max_object_size),
[=, this, &ctx, &onode](auto &objhandler) {
return objhandler.truncate(
ObjectDataHandler::context_t{
*transaction_manager,
*ctx.transaction,
*onode
},
size);
});
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_setattrs(
internal_context_t &ctx,
OnodeRef &onode,
std::map<std::string, bufferlist>&& aset)
{
LOG_PREFIX(SeaStore::_setattrs);
DEBUGT("onode={}", *ctx.transaction, *onode);
auto fut = tm_iertr::now();
auto& layout = onode->get_mutable_layout(*ctx.transaction);
if (auto it = aset.find(OI_ATTR); it != aset.end()) {
auto& val = it->second;
if (likely(val.length() <= onode_layout_t::MAX_OI_LENGTH)) {
maybe_inline_memcpy(
&layout.oi[0],
val.c_str(),
val.length(),
onode_layout_t::MAX_OI_LENGTH);
if (!layout.oi_size) {
// if oi was not in the layout, it probably exists in the omap,
// need to remove it first
fut = _xattr_rmattr(ctx, onode, OI_ATTR);
}
layout.oi_size = val.length();
aset.erase(it);
} else {
layout.oi_size = 0;
}
}
if (auto it = aset.find(SS_ATTR); it != aset.end()) {
auto& val = it->second;
if (likely(val.length() <= onode_layout_t::MAX_SS_LENGTH)) {
maybe_inline_memcpy(
&layout.ss[0],
val.c_str(),
val.length(),
onode_layout_t::MAX_SS_LENGTH);
if (!layout.ss_size) {
fut = _xattr_rmattr(ctx, onode, SS_ATTR);
}
layout.ss_size = val.length();
aset.erase(it);
} else {
layout.ss_size = 0;
}
}
if (aset.empty()) {
return fut;
}
return fut.si_then(
[this, onode, &ctx, &layout,
aset=std::move(aset)]() mutable {
return _omap_set_kvs(
onode,
onode->get_layout().xattr_root,
*ctx.transaction,
layout.xattr_root,
std::move(aset));
});
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_rmattr(
internal_context_t &ctx,
OnodeRef &onode,
std::string name)
{
LOG_PREFIX(SeaStore::_rmattr);
DEBUGT("onode={}", *ctx.transaction, *onode);
auto& layout = onode->get_mutable_layout(*ctx.transaction);
if ((name == OI_ATTR) && (layout.oi_size > 0)) {
memset(&layout.oi[0], 0, layout.oi_size);
layout.oi_size = 0;
return tm_iertr::now();
} else if ((name == SS_ATTR) && (layout.ss_size > 0)) {
memset(&layout.ss[0], 0, layout.ss_size);
layout.ss_size = 0;
return tm_iertr::now();
} else {
return _xattr_rmattr(
ctx,
onode,
std::move(name));
}
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_xattr_rmattr(
internal_context_t &ctx,
OnodeRef &onode,
std::string &&name)
{
LOG_PREFIX(SeaStore::_xattr_rmattr);
DEBUGT("onode={}", *ctx.transaction, *onode);
auto xattr_root = onode->get_layout().xattr_root.get(
onode->get_metadata_hint(device->get_block_size()));
if (xattr_root.is_null()) {
return seastar::now();
} else {
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
onode->get_layout().xattr_root.get(
onode->get_metadata_hint(device->get_block_size())),
std::move(name),
[&ctx, &onode](auto &omap_manager, auto &xattr_root, auto &name) {
return omap_manager.omap_rm_key(xattr_root, *ctx.transaction, name)
.si_then([&] {
if (xattr_root.must_update()) {
onode->get_mutable_layout(*ctx.transaction
).xattr_root.update(xattr_root);
}
});
});
}
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_rmattrs(
internal_context_t &ctx,
OnodeRef &onode)
{
LOG_PREFIX(SeaStore::_rmattrs);
DEBUGT("onode={}", *ctx.transaction, *onode);
auto& layout = onode->get_mutable_layout(*ctx.transaction);
memset(&layout.oi[0], 0, layout.oi_size);
layout.oi_size = 0;
memset(&layout.ss[0], 0, layout.ss_size);
layout.ss_size = 0;
return _xattr_clear(ctx, onode);
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_xattr_clear(
internal_context_t &ctx,
OnodeRef &onode)
{
LOG_PREFIX(SeaStore::_xattr_clear);
DEBUGT("onode={}", *ctx.transaction, *onode);
auto xattr_root = onode->get_layout().xattr_root.get(
onode->get_metadata_hint(device->get_block_size()));
if (xattr_root.is_null()) {
return seastar::now();
} else {
return seastar::do_with(
BtreeOMapManager(*transaction_manager),
onode->get_layout().xattr_root.get(
onode->get_metadata_hint(device->get_block_size())),
[&ctx, &onode](auto &omap_manager, auto &xattr_root) {
return omap_manager.omap_clear(xattr_root, *ctx.transaction)
.si_then([&] {
if (xattr_root.must_update()) {
onode->get_mutable_layout(*ctx.transaction
).xattr_root.update(xattr_root);
}
});
});
}
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_create_collection(
internal_context_t &ctx,
const coll_t& cid, int bits)
{
return transaction_manager->read_collection_root(
*ctx.transaction
).si_then([=, this, &ctx](auto _cmroot) {
return seastar::do_with(
_cmroot,
[=, this, &ctx](auto &cmroot) {
return collection_manager->create(
cmroot,
*ctx.transaction,
cid,
bits
).si_then([this, &ctx, &cmroot] {
if (cmroot.must_update()) {
transaction_manager->write_collection_root(
*ctx.transaction,
cmroot);
}
});
}
);
}).handle_error_interruptible(
tm_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in SeaStore::_create_collection"
}
);
}
SeaStore::Shard::tm_ret
SeaStore::Shard::_remove_collection(
internal_context_t &ctx,
const coll_t& cid)
{
return transaction_manager->read_collection_root(
*ctx.transaction
).si_then([=, this, &ctx](auto _cmroot) {
return seastar::do_with(
_cmroot,
[=, this, &ctx](auto &cmroot) {
return collection_manager->remove(
cmroot,
*ctx.transaction,
cid
).si_then([this, &ctx, &cmroot] {
// param here denotes whether it already existed, probably error
if (cmroot.must_update()) {
transaction_manager->write_collection_root(
*ctx.transaction,
cmroot);
}
});
});
}).handle_error_interruptible(
tm_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in SeaStore::_create_collection"
}
);
}
boost::intrusive_ptr<SeastoreCollection>
SeaStore::Shard::_get_collection(const coll_t& cid)
{
return new SeastoreCollection{cid};
}
seastar::future<> SeaStore::Shard::write_meta(
const std::string& key,
const std::string& value)
{
LOG_PREFIX(SeaStore::write_meta);
DEBUG("key: {}; value: {}", key, value);
return seastar::do_with(
key, value,
[this, FNAME](auto& key, auto& value) {
return repeat_eagain([this, FNAME, &key, &value] {
return transaction_manager->with_transaction_intr(
Transaction::src_t::MUTATE,
"write_meta",
[this, FNAME, &key, &value](auto& t)
{
DEBUGT("Have transaction, key: {}; value: {}", t, key, value);
return transaction_manager->update_root_meta(
t, key, value
).si_then([this, &t] {
return transaction_manager->submit_transaction(t);
});
});
});
}).handle_error(
crimson::ct_error::assert_all{"Invalid error in SeaStore::write_meta"}
);
}
seastar::future<std::tuple<int, std::string>>
SeaStore::read_meta(const std::string& key)
{
ceph_assert(seastar::this_shard_id() == primary_core);
LOG_PREFIX(SeaStore::read_meta);
DEBUG("key: {}", key);
return mdstore->read_meta(key).safe_then([](auto v) {
if (v) {
return std::make_tuple(0, std::move(*v));
} else {
return std::make_tuple(-1, std::string(""));
}
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::read_meta"
}
);
}
uuid_d SeaStore::Shard::get_fsid() const
{
return device->get_meta().seastore_id;
}
void SeaStore::Shard::init_managers()
{
transaction_manager.reset();
collection_manager.reset();
onode_manager.reset();
transaction_manager = make_transaction_manager(
device, secondaries, is_test);
collection_manager = std::make_unique<collection_manager::FlatCollectionManager>(
*transaction_manager);
onode_manager = std::make_unique<crimson::os::seastore::onode::FLTreeOnodeManager>(
*transaction_manager);
}
std::unique_ptr<SeaStore> make_seastore(
const std::string &device)
{
auto mdstore = std::make_unique<FileMDStore>(device);
return std::make_unique<SeaStore>(
device,
std::move(mdstore));
}
std::unique_ptr<SeaStore> make_test_seastore(
SeaStore::MDStoreRef mdstore)
{
return std::make_unique<SeaStore>(
"",
std::move(mdstore));
}
}
| 60,842 | 28.40696 | 85 | cc |
null | ceph-main/src/crimson/os/seastore/seastore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <unordered_map>
#include <map>
#include <typeinfo>
#include <vector>
#include <optional>
#include <seastar/core/future.hh>
#include <seastar/core/metrics_types.hh>
#include "include/uuid.h"
#include "os/Transaction.h"
#include "crimson/common/throttle.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
#include "crimson/os/seastore/device.h"
#include "crimson/os/seastore/transaction.h"
#include "crimson/os/seastore/onode_manager.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/collection_manager.h"
#include "crimson/os/seastore/object_data_handler.h"
namespace crimson::os::seastore {
class Onode;
using OnodeRef = boost::intrusive_ptr<Onode>;
class TransactionManager;
enum class op_type_t : uint8_t {
TRANSACTION = 0,
READ,
WRITE,
GET_ATTR,
GET_ATTRS,
STAT,
OMAP_GET_VALUES,
OMAP_LIST,
MAX
};
class SeastoreCollection final : public FuturizedCollection {
public:
template <typename... T>
SeastoreCollection(T&&... args) :
FuturizedCollection(std::forward<T>(args)...) {}
seastar::shared_mutex ordering_lock;
};
/**
* col_obj_ranges_t
*
* Represents the two ghobject_t ranges spanned by a PG collection.
* Temp objects will be within [temp_begin, temp_end) and normal objects
* will be in [obj_begin, obj_end).
*/
struct col_obj_ranges_t {
ghobject_t temp_begin;
ghobject_t temp_end;
ghobject_t obj_begin;
ghobject_t obj_end;
};
class SeaStore final : public FuturizedStore {
public:
class MDStore {
public:
using base_iertr = crimson::errorator<
crimson::ct_error::input_output_error
>;
using write_meta_ertr = base_iertr;
using write_meta_ret = write_meta_ertr::future<>;
virtual write_meta_ret write_meta(
const std::string &key,
const std::string &val
) = 0;
using read_meta_ertr = base_iertr;
using read_meta_ret = write_meta_ertr::future<std::optional<std::string>>;
virtual read_meta_ret read_meta(const std::string &key) = 0;
virtual ~MDStore() {}
};
using MDStoreRef = std::unique_ptr<MDStore>;
class Shard : public FuturizedStore::Shard {
public:
Shard(
std::string root,
Device* device,
bool is_test);
~Shard() = default;
seastar::future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<ceph::bufferlist> read(
CollectionRef c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
uint32_t op_flags = 0) final;
read_errorator::future<ceph::bufferlist> readv(
CollectionRef c,
const ghobject_t& oid,
interval_set<uint64_t>& m,
uint32_t op_flags = 0) final;
get_attr_errorator::future<ceph::bufferlist> get_attr(
CollectionRef c,
const ghobject_t& oid,
std::string_view name) const final;
get_attrs_ertr::future<attrs_t> get_attrs(
CollectionRef c,
const ghobject_t& oid) final;
read_errorator::future<omap_values_t> omap_get_values(
CollectionRef c,
const ghobject_t& oid,
const omap_keys_t& keys) final;
/// Retrieves paged set of values > start (if present)
using omap_get_values_ret_bare_t = std::tuple<bool, omap_values_t>;
using omap_get_values_ret_t = read_errorator::future<
omap_get_values_ret_bare_t>;
omap_get_values_ret_t omap_get_values(
CollectionRef c, ///< [in] collection
const ghobject_t &oid, ///< [in] oid
const std::optional<std::string> &start ///< [in] start, empty for begin
) final; ///< @return <done, values> values.empty() iff done
get_attr_errorator::future<bufferlist> omap_get_header(
CollectionRef c,
const ghobject_t& oid) final;
seastar::future<std::tuple<std::vector<ghobject_t>, ghobject_t>> list_objects(
CollectionRef c,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) const final;
seastar::future<CollectionRef> create_new_collection(const coll_t& cid) final;
seastar::future<CollectionRef> open_collection(const coll_t& cid) final;
seastar::future<> do_transaction_no_callbacks(
CollectionRef ch,
ceph::os::Transaction&& txn) final;
/* Note, flush() machinery must go through the same pipeline
* stages and locks as do_transaction. */
seastar::future<> flush(CollectionRef ch) final;
read_errorator::future<std::map<uint64_t, uint64_t>> fiemap(
CollectionRef ch,
const ghobject_t& oid,
uint64_t off,
uint64_t len) final;
unsigned get_max_attr_name_length() const final {
return 256;
}
// only exposed to SeaStore
public:
seastar::future<> umount();
// init managers and mount transaction_manager
seastar::future<> mount_managers();
void set_secondaries(Device& sec_dev) {
secondaries.emplace_back(&sec_dev);
}
using coll_core_t = FuturizedStore::coll_core_t;
seastar::future<std::vector<coll_core_t>> list_collections();
seastar::future<> write_meta(const std::string& key,
const std::string& value);
store_statfs_t stat() const;
uuid_d get_fsid() const;
seastar::future<> mkfs_managers();
void init_managers();
private:
struct internal_context_t {
CollectionRef ch;
ceph::os::Transaction ext_transaction;
internal_context_t(
CollectionRef ch,
ceph::os::Transaction &&_ext_transaction,
TransactionRef &&transaction)
: ch(ch), ext_transaction(std::move(_ext_transaction)),
transaction(std::move(transaction)),
iter(ext_transaction.begin()) {}
TransactionRef transaction;
ceph::os::Transaction::iterator iter;
std::chrono::steady_clock::time_point begin_timestamp = std::chrono::steady_clock::now();
void reset_preserve_handle(TransactionManager &tm) {
tm.reset_transaction_preserve_handle(*transaction);
iter = ext_transaction.begin();
}
};
TransactionManager::read_extent_iertr::future<std::optional<unsigned>>
get_coll_bits(CollectionRef ch, Transaction &t) const;
static void on_error(ceph::os::Transaction &t);
template <typename F>
auto repeat_with_internal_context(
CollectionRef ch,
ceph::os::Transaction &&t,
Transaction::src_t src,
const char* tname,
op_type_t op_type,
F &&f) {
return seastar::do_with(
internal_context_t(
ch, std::move(t),
transaction_manager->create_transaction(src, tname)),
std::forward<F>(f),
[this, op_type](auto &ctx, auto &f) {
return ctx.transaction->get_handle().take_collection_lock(
static_cast<SeastoreCollection&>(*(ctx.ch)).ordering_lock
).then([this] {
return throttler.get(1);
}).then([&, this] {
return repeat_eagain([&, this] {
ctx.reset_preserve_handle(*transaction_manager);
return std::invoke(f, ctx);
}).handle_error(
crimson::ct_error::eagain::pass_further{},
crimson::ct_error::all_same_way([&ctx](auto e) {
on_error(ctx.ext_transaction);
})
);
}).then([this, op_type, &ctx] {
add_latency_sample(op_type,
std::chrono::steady_clock::now() - ctx.begin_timestamp);
}).finally([this] {
throttler.put();
});
});
}
template <typename Ret, typename F>
auto repeat_with_onode(
CollectionRef ch,
const ghobject_t &oid,
Transaction::src_t src,
const char* tname,
op_type_t op_type,
F &&f) const {
auto begin_time = std::chrono::steady_clock::now();
return seastar::do_with(
oid, Ret{}, std::forward<F>(f),
[this, src, op_type, begin_time, tname
](auto &oid, auto &ret, auto &f)
{
return repeat_eagain([&, this, src, tname] {
return transaction_manager->with_transaction_intr(
src,
tname,
[&, this](auto& t)
{
return onode_manager->get_onode(t, oid
).si_then([&](auto onode) {
return seastar::do_with(std::move(onode), [&](auto& onode) {
return f(t, *onode);
});
}).si_then([&ret](auto _ret) {
ret = _ret;
});
});
}).safe_then([&ret, op_type, begin_time, this] {
const_cast<Shard*>(this)->add_latency_sample(op_type,
std::chrono::steady_clock::now() - begin_time);
return seastar::make_ready_future<Ret>(ret);
});
});
}
using _fiemap_ret = ObjectDataHandler::fiemap_ret;
_fiemap_ret _fiemap(
Transaction &t,
Onode &onode,
uint64_t off,
uint64_t len) const;
using _omap_get_value_iertr = OMapManager::base_iertr::extend<
crimson::ct_error::enodata
>;
using _omap_get_value_ret = _omap_get_value_iertr::future<ceph::bufferlist>;
_omap_get_value_ret _omap_get_value(
Transaction &t,
omap_root_t &&root,
std::string_view key) const;
using _omap_get_values_iertr = OMapManager::base_iertr;
using _omap_get_values_ret = _omap_get_values_iertr::future<omap_values_t>;
_omap_get_values_ret _omap_get_values(
Transaction &t,
omap_root_t &&root,
const omap_keys_t &keys) const;
friend class SeaStoreOmapIterator;
using omap_list_bare_ret = OMapManager::omap_list_bare_ret;
using omap_list_ret = OMapManager::omap_list_ret;
omap_list_ret omap_list(
Onode &onode,
const omap_root_le_t& omap_root,
Transaction& t,
const std::optional<std::string>& start,
OMapManager::omap_list_config_t config) const;
using tm_iertr = TransactionManager::base_iertr;
using tm_ret = tm_iertr::future<>;
tm_ret _do_transaction_step(
internal_context_t &ctx,
CollectionRef &col,
std::vector<OnodeRef> &onodes,
std::vector<OnodeRef> &d_onodes,
ceph::os::Transaction::iterator &i);
tm_ret _remove(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _touch(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _write(
internal_context_t &ctx,
OnodeRef &onode,
uint64_t offset, size_t len,
ceph::bufferlist &&bl,
uint32_t fadvise_flags);
tm_ret _zero(
internal_context_t &ctx,
OnodeRef &onode,
objaddr_t offset, extent_len_t len);
tm_ret _omap_set_values(
internal_context_t &ctx,
OnodeRef &onode,
std::map<std::string, ceph::bufferlist> &&aset);
tm_ret _omap_set_header(
internal_context_t &ctx,
OnodeRef &onode,
ceph::bufferlist &&header);
tm_ret _omap_clear(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _omap_rmkeys(
internal_context_t &ctx,
OnodeRef &onode,
omap_keys_t &&aset);
tm_ret _omap_rmkeyrange(
internal_context_t &ctx,
OnodeRef &onode,
std::string first,
std::string last);
tm_ret _truncate(
internal_context_t &ctx,
OnodeRef &onode, uint64_t size);
tm_ret _setattrs(
internal_context_t &ctx,
OnodeRef &onode,
std::map<std::string,bufferlist>&& aset);
tm_ret _rmattr(
internal_context_t &ctx,
OnodeRef &onode,
std::string name);
tm_ret _rmattrs(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _xattr_rmattr(
internal_context_t &ctx,
OnodeRef &onode,
std::string &&name);
tm_ret _xattr_clear(
internal_context_t &ctx,
OnodeRef &onode);
tm_ret _create_collection(
internal_context_t &ctx,
const coll_t& cid, int bits);
tm_ret _remove_collection(
internal_context_t &ctx,
const coll_t& cid);
using omap_set_kvs_ret = tm_iertr::future<>;
omap_set_kvs_ret _omap_set_kvs(
OnodeRef &onode,
const omap_root_le_t& omap_root,
Transaction& t,
omap_root_le_t& mutable_omap_root,
std::map<std::string, ceph::bufferlist>&& kvs);
boost::intrusive_ptr<SeastoreCollection> _get_collection(const coll_t& cid);
static constexpr auto LAT_MAX = static_cast<std::size_t>(op_type_t::MAX);
struct {
std::array<seastar::metrics::histogram, LAT_MAX> op_lat;
} stats;
seastar::metrics::histogram& get_latency(
op_type_t op_type) {
assert(static_cast<std::size_t>(op_type) < stats.op_lat.size());
return stats.op_lat[static_cast<std::size_t>(op_type)];
}
void add_latency_sample(op_type_t op_type,
std::chrono::steady_clock::duration dur) {
seastar::metrics::histogram& lat = get_latency(op_type);
lat.sample_count++;
lat.sample_sum += std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
}
private:
std::string root;
Device* device;
const uint32_t max_object_size;
bool is_test;
std::vector<Device*> secondaries;
TransactionManagerRef transaction_manager;
CollectionManagerRef collection_manager;
OnodeManagerRef onode_manager;
common::Throttle throttler;
seastar::metrics::metric_group metrics;
void register_metrics();
};
public:
SeaStore(
const std::string& root,
MDStoreRef mdstore);
~SeaStore();
seastar::future<> start() final;
seastar::future<> stop() final;
mount_ertr::future<> mount() final;
seastar::future<> umount() final;
mkfs_ertr::future<> mkfs(uuid_d new_osd_fsid) final;
seastar::future<store_statfs_t> stat() const final;
uuid_d get_fsid() const final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.local().get_fsid();
}
seastar::future<> write_meta(
const std::string& key,
const std::string& value) final {
ceph_assert(seastar::this_shard_id() == primary_core);
return shard_stores.local().write_meta(
key, value).then([this, key, value] {
return mdstore->write_meta(key, value);
}).handle_error(
crimson::ct_error::assert_all{"Invalid error in SeaStore::write_meta"}
);
}
seastar::future<std::tuple<int, std::string>> read_meta(const std::string& key) final;
seastar::future<std::vector<coll_core_t>> list_collections() final;
FuturizedStore::Shard& get_sharded_store() final {
return shard_stores.local();
}
static col_obj_ranges_t
get_objs_range(CollectionRef ch, unsigned bits);
// for test
public:
mount_ertr::future<> test_mount();
mkfs_ertr::future<> test_mkfs(uuid_d new_osd_fsid);
DeviceRef get_primary_device_ref() {
return std::move(device);
}
seastar::future<> test_start(DeviceRef dev);
private:
seastar::future<> write_fsid(uuid_d new_osd_fsid);
seastar::future<> prepare_meta(uuid_d new_osd_fsid);
seastar::future<> set_secondaries();
private:
std::string root;
MDStoreRef mdstore;
DeviceRef device;
std::vector<DeviceRef> secondaries;
seastar::sharded<SeaStore::Shard> shard_stores;
};
std::unique_ptr<SeaStore> make_seastore(
const std::string &device);
std::unique_ptr<SeaStore> make_test_seastore(
SeaStore::MDStoreRef mdstore);
}
| 15,257 | 27.897727 | 95 | h |
null | ceph-main/src/crimson/os/seastore/seastore_types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/common/log.h"
namespace {
seastar::logger& journal_logger() {
return crimson::get_logger(ceph_subsys_seastore_journal);
}
}
namespace crimson::os::seastore {
bool is_aligned(uint64_t offset, uint64_t alignment)
{
return (offset % alignment) == 0;
}
std::ostream& operator<<(std::ostream &out, const omap_root_t &root)
{
return out << "omap_root{addr=" << root.addr
<< ", depth=" << root.depth
<< ", hint=" << root.hint
<< ", mutated=" << root.mutated
<< "}";
}
std::ostream& operator<<(std::ostream& out, const seastore_meta_t& meta)
{
return out << meta.seastore_id;
}
std::ostream &operator<<(std::ostream &out, const device_id_printer_t &id)
{
auto _id = id.id;
if (_id == DEVICE_ID_NULL) {
return out << "Dev(NULL)";
} else if (_id == DEVICE_ID_RECORD_RELATIVE) {
return out << "Dev(RR)";
} else if (_id == DEVICE_ID_BLOCK_RELATIVE) {
return out << "Dev(BR)";
} else if (_id == DEVICE_ID_DELAYED) {
return out << "Dev(DELAYED)";
} else if (_id == DEVICE_ID_FAKE) {
return out << "Dev(FAKE)";
} else if (_id == DEVICE_ID_ZERO) {
return out << "Dev(ZERO)";
} else if (_id == DEVICE_ID_ROOT) {
return out << "Dev(ROOT)";
} else {
return out << "Dev(" << (unsigned)_id << ")";
}
}
std::ostream &operator<<(std::ostream &out, const segment_id_t &segment)
{
if (segment == NULL_SEG_ID) {
return out << "Seg[NULL]";
} else {
return out << "Seg[" << device_id_printer_t{segment.device_id()}
<< "," << segment.device_segment_id()
<< "]";
}
}
std::ostream& operator<<(std::ostream& out, segment_type_t t)
{
switch(t) {
case segment_type_t::JOURNAL:
return out << "JOURNAL";
case segment_type_t::OOL:
return out << "OOL";
case segment_type_t::NULL_SEG:
return out << "NULL_SEG";
default:
return out << "INVALID_SEGMENT_TYPE!";
}
}
std::ostream& operator<<(std::ostream& out, segment_seq_printer_t seq)
{
if (seq.seq == NULL_SEG_SEQ) {
return out << "sseq(NULL)";
} else {
return out << "sseq(" << seq.seq << ")";
}
}
std::ostream &operator<<(std::ostream &out, const paddr_t &rhs)
{
auto id = rhs.get_device_id();
out << "paddr<";
if (rhs == P_ADDR_NULL) {
out << "NULL";
} else if (rhs == P_ADDR_MIN) {
out << "MIN";
} else if (rhs == P_ADDR_ZERO) {
out << "ZERO";
} else if (has_device_off(id)) {
auto &s = rhs.as_res_paddr();
out << device_id_printer_t{id}
<< ","
<< s.get_device_off();
} else if (rhs.get_addr_type() == paddr_types_t::SEGMENT) {
auto &s = rhs.as_seg_paddr();
out << s.get_segment_id()
<< ","
<< s.get_segment_off();
} else if (rhs.get_addr_type() == paddr_types_t::RANDOM_BLOCK) {
auto &s = rhs.as_blk_paddr();
out << device_id_printer_t{s.get_device_id()}
<< ","
<< s.get_device_off();
} else {
out << "INVALID!";
}
return out << ">";
}
journal_seq_t journal_seq_t::add_offset(
journal_type_t type,
device_off_t off,
device_off_t roll_start,
device_off_t roll_size) const
{
assert(offset.is_absolute());
assert(off <= DEVICE_OFF_MAX && off >= DEVICE_OFF_MIN);
assert(roll_start >= 0);
assert(roll_size > 0);
segment_seq_t jseq = segment_seq;
device_off_t joff;
if (type == journal_type_t::SEGMENTED) {
joff = offset.as_seg_paddr().get_segment_off();
} else {
assert(type == journal_type_t::RANDOM_BLOCK);
auto boff = offset.as_blk_paddr().get_device_off();
joff = boff;
}
auto roll_end = roll_start + roll_size;
assert(joff >= roll_start);
assert(joff <= roll_end);
if (off >= 0) {
device_off_t new_jseq = jseq + (off / roll_size);
joff += (off % roll_size);
if (joff >= roll_end) {
++new_jseq;
joff -= roll_size;
}
assert(new_jseq < MAX_SEG_SEQ);
jseq = static_cast<segment_seq_t>(new_jseq);
} else {
device_off_t mod = (-off) / roll_size;
joff -= ((-off) % roll_size);
if (joff < roll_start) {
++mod;
joff += roll_size;
}
if (jseq >= mod) {
jseq -= mod;
} else {
return JOURNAL_SEQ_MIN;
}
}
assert(joff >= roll_start);
assert(joff < roll_end);
return journal_seq_t{jseq, make_block_relative_paddr(joff)};
}
device_off_t journal_seq_t::relative_to(
journal_type_t type,
const journal_seq_t& r,
device_off_t roll_start,
device_off_t roll_size) const
{
assert(offset.is_absolute());
assert(r.offset.is_absolute());
assert(roll_start >= 0);
assert(roll_size > 0);
device_off_t ret = static_cast<device_off_t>(segment_seq) - r.segment_seq;
ret *= roll_size;
if (type == journal_type_t::SEGMENTED) {
ret += (static_cast<device_off_t>(offset.as_seg_paddr().get_segment_off()) -
static_cast<device_off_t>(r.offset.as_seg_paddr().get_segment_off()));
} else {
assert(type == journal_type_t::RANDOM_BLOCK);
ret += offset.as_blk_paddr().get_device_off() -
r.offset.as_blk_paddr().get_device_off();
}
assert(ret <= DEVICE_OFF_MAX && ret >= DEVICE_OFF_MIN);
return ret;
}
std::ostream &operator<<(std::ostream &out, const journal_seq_t &seq)
{
if (seq == JOURNAL_SEQ_NULL) {
return out << "JOURNAL_SEQ_NULL";
} else if (seq == JOURNAL_SEQ_MIN) {
return out << "JOURNAL_SEQ_MIN";
} else {
return out << "jseq("
<< segment_seq_printer_t{seq.segment_seq}
<< ", " << seq.offset
<< ")";
}
}
std::ostream &operator<<(std::ostream &out, extent_types_t t)
{
switch (t) {
case extent_types_t::ROOT:
return out << "ROOT";
case extent_types_t::LADDR_INTERNAL:
return out << "LADDR_INTERNAL";
case extent_types_t::LADDR_LEAF:
return out << "LADDR_LEAF";
case extent_types_t::DINK_LADDR_LEAF:
return out << "LADDR_LEAF";
case extent_types_t::ONODE_BLOCK_STAGED:
return out << "ONODE_BLOCK_STAGED";
case extent_types_t::OMAP_INNER:
return out << "OMAP_INNER";
case extent_types_t::OMAP_LEAF:
return out << "OMAP_LEAF";
case extent_types_t::COLL_BLOCK:
return out << "COLL_BLOCK";
case extent_types_t::OBJECT_DATA_BLOCK:
return out << "OBJECT_DATA_BLOCK";
case extent_types_t::RETIRED_PLACEHOLDER:
return out << "RETIRED_PLACEHOLDER";
case extent_types_t::TEST_BLOCK:
return out << "TEST_BLOCK";
case extent_types_t::TEST_BLOCK_PHYSICAL:
return out << "TEST_BLOCK_PHYSICAL";
case extent_types_t::BACKREF_INTERNAL:
return out << "BACKREF_INTERNAL";
case extent_types_t::BACKREF_LEAF:
return out << "BACKREF_LEAF";
case extent_types_t::NONE:
return out << "NONE";
default:
return out << "UNKNOWN";
}
}
std::ostream &operator<<(std::ostream &out, rewrite_gen_printer_t gen)
{
if (gen.gen == NULL_GENERATION) {
return out << "GEN_NULL";
} else if (gen.gen == INIT_GENERATION) {
return out << "GEN_INIT";
} else if (gen.gen == INLINE_GENERATION) {
return out << "GEN_INL";
} else if (gen.gen == OOL_GENERATION) {
return out << "GEN_OOL";
} else if (gen.gen > REWRITE_GENERATIONS) {
return out << "GEN_INVALID(" << (unsigned)gen.gen << ")!";
} else {
return out << "GEN(" << (unsigned)gen.gen << ")";
}
}
std::ostream &operator<<(std::ostream &out, data_category_t c)
{
switch (c) {
case data_category_t::METADATA:
return out << "MD";
case data_category_t::DATA:
return out << "DATA";
default:
return out << "INVALID_CATEGORY!";
}
}
std::ostream &operator<<(std::ostream &out, sea_time_point_printer_t tp)
{
if (tp.tp == NULL_TIME) {
return out << "tp(NULL)";
}
auto time = seastar::lowres_system_clock::to_time_t(tp.tp);
char buf[32];
std::strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", std::localtime(&time));
return out << "tp(" << buf << ")";
}
std::ostream &operator<<(std::ostream &out, mod_time_point_printer_t tp) {
auto time = mod_to_timepoint(tp.tp);
return out << "mod_" << sea_time_point_printer_t{time};
}
std::ostream &operator<<(std::ostream &out, const laddr_list_t &rhs)
{
bool first = false;
for (auto &i: rhs) {
out << (first ? '[' : ',') << '(' << i.first << ',' << i.second << ')';
first = true;
}
return out << ']';
}
std::ostream &operator<<(std::ostream &out, const paddr_list_t &rhs)
{
bool first = false;
for (auto &i: rhs) {
out << (first ? '[' : ',') << '(' << i.first << ',' << i.second << ')';
first = true;
}
return out << ']';
}
std::ostream &operator<<(std::ostream &out, const delta_info_t &delta)
{
return out << "delta_info_t("
<< "type: " << delta.type
<< ", paddr: " << delta.paddr
<< ", laddr: " << delta.laddr
<< ", prev_crc: " << delta.prev_crc
<< ", final_crc: " << delta.final_crc
<< ", length: " << delta.length
<< ", pversion: " << delta.pversion
<< ", ext_seq: " << delta.ext_seq
<< ", seg_type: " << delta.seg_type
<< ")";
}
std::ostream &operator<<(std::ostream &out, const journal_tail_delta_t &delta)
{
return out << "journal_tail_delta_t("
<< "alloc_tail=" << delta.alloc_tail
<< ", dirty_tail=" << delta.dirty_tail
<< ")";
}
std::ostream &operator<<(std::ostream &out, const extent_info_t &info)
{
return out << "extent_info_t("
<< "type: " << info.type
<< ", addr: " << info.addr
<< ", len: " << info.len
<< ")";
}
std::ostream &operator<<(std::ostream &out, const segment_header_t &header)
{
return out << "segment_header_t("
<< header.physical_segment_id
<< " " << header.type
<< " " << segment_seq_printer_t{header.segment_seq}
<< " " << header.category
<< " " << rewrite_gen_printer_t{header.generation}
<< ", dirty_tail=" << header.dirty_tail
<< ", alloc_tail=" << header.alloc_tail
<< ", segment_nonce=" << header.segment_nonce
<< ")";
}
std::ostream &operator<<(std::ostream &out, const segment_tail_t &tail)
{
return out << "segment_tail_t("
<< tail.physical_segment_id
<< " " << tail.type
<< " " << segment_seq_printer_t{tail.segment_seq}
<< ", segment_nonce=" << tail.segment_nonce
<< ", modify_time=" << mod_time_point_printer_t{tail.modify_time}
<< ", num_extents=" << tail.num_extents
<< ")";
}
extent_len_t record_size_t::get_raw_mdlength() const
{
// empty record is allowed to submit
return plain_mdlength +
ceph::encoded_sizeof_bounded<record_header_t>();
}
void record_size_t::account_extent(extent_len_t extent_len)
{
assert(extent_len);
plain_mdlength += ceph::encoded_sizeof_bounded<extent_info_t>();
dlength += extent_len;
}
void record_size_t::account(const delta_info_t& delta)
{
assert(delta.bl.length());
plain_mdlength += ceph::encoded_sizeof(delta);
}
std::ostream &operator<<(std::ostream &os, transaction_type_t type)
{
switch (type) {
case transaction_type_t::MUTATE:
return os << "MUTATE";
case transaction_type_t::READ:
return os << "READ";
case transaction_type_t::TRIM_DIRTY:
return os << "TRIM_DIRTY";
case transaction_type_t::TRIM_ALLOC:
return os << "TRIM_ALLOC";
case transaction_type_t::CLEANER_MAIN:
return os << "CLEANER_MAIN";
case transaction_type_t::CLEANER_COLD:
return os << "CLEANER_COLD";
case transaction_type_t::MAX:
return os << "TRANS_TYPE_NULL";
default:
return os << "INVALID_TRANS_TYPE("
<< static_cast<std::size_t>(type)
<< ")";
}
}
std::ostream &operator<<(std::ostream& out, const record_size_t& rsize)
{
return out << "record_size_t("
<< "raw_md=" << rsize.get_raw_mdlength()
<< ", data=" << rsize.dlength
<< ")";
}
std::ostream &operator<<(std::ostream& out, const record_t& r)
{
return out << "record_t("
<< "type=" << r.type
<< ", num_extents=" << r.extents.size()
<< ", num_deltas=" << r.deltas.size()
<< ", modify_time=" << sea_time_point_printer_t{r.modify_time}
<< ")";
}
std::ostream &operator<<(std::ostream& out, const record_header_t& r)
{
return out << "record_header_t("
<< "type=" << r.type
<< ", num_extents=" << r.extents
<< ", num_deltas=" << r.deltas
<< ", modify_time=" << mod_time_point_printer_t{r.modify_time}
<< ")";
}
std::ostream& operator<<(std::ostream& out, const record_group_header_t& h)
{
return out << "record_group_header_t("
<< "num_records=" << h.records
<< ", mdlength=" << h.mdlength
<< ", dlength=" << h.dlength
<< ", nonce=" << h.segment_nonce
<< ", committed_to=" << h.committed_to
<< ", data_crc=" << h.data_crc
<< ")";
}
extent_len_t record_group_size_t::get_raw_mdlength() const
{
return plain_mdlength +
sizeof(checksum_t) +
ceph::encoded_sizeof_bounded<record_group_header_t>();
}
void record_group_size_t::account(
const record_size_t& rsize,
extent_len_t _block_size)
{
// empty record is allowed to submit
assert(_block_size > 0);
assert(rsize.dlength % _block_size == 0);
assert(block_size == 0 || block_size == _block_size);
plain_mdlength += rsize.get_raw_mdlength();
dlength += rsize.dlength;
block_size = _block_size;
}
std::ostream& operator<<(std::ostream& out, const record_group_size_t& size)
{
return out << "record_group_size_t("
<< "raw_md=" << size.get_raw_mdlength()
<< ", data=" << size.dlength
<< ", block_size=" << size.block_size
<< ", fullness=" << size.get_fullness()
<< ")";
}
std::ostream& operator<<(std::ostream& out, const record_group_t& rg)
{
return out << "record_group_t("
<< "num_records=" << rg.records.size()
<< ", " << rg.size
<< ")";
}
ceph::bufferlist encode_record(
record_t&& record,
extent_len_t block_size,
const journal_seq_t& committed_to,
segment_nonce_t current_segment_nonce)
{
record_group_t record_group(std::move(record), block_size);
return encode_records(
record_group,
committed_to,
current_segment_nonce);
}
ceph::bufferlist encode_records(
record_group_t& record_group,
const journal_seq_t& committed_to,
segment_nonce_t current_segment_nonce)
{
assert(record_group.size.block_size > 0);
assert(record_group.records.size() > 0);
bufferlist data_bl;
for (auto& r: record_group.records) {
for (auto& i: r.extents) {
assert(i.bl.length());
data_bl.append(i.bl);
}
}
bufferlist bl;
record_group_header_t header{
static_cast<extent_len_t>(record_group.records.size()),
record_group.size.get_mdlength(),
record_group.size.dlength,
current_segment_nonce,
committed_to,
data_bl.crc32c(-1)
};
encode(header, bl);
auto metadata_crc_filler = bl.append_hole(sizeof(checksum_t));
for (auto& r: record_group.records) {
record_header_t rheader{
r.type,
(extent_len_t)r.deltas.size(),
(extent_len_t)r.extents.size(),
timepoint_to_mod(r.modify_time)
};
encode(rheader, bl);
}
for (auto& r: record_group.records) {
for (const auto& i: r.extents) {
encode(extent_info_t(i), bl);
}
}
for (auto& r: record_group.records) {
for (const auto& i: r.deltas) {
encode(i, bl);
}
}
ceph_assert(bl.length() == record_group.size.get_raw_mdlength());
auto aligned_mdlength = record_group.size.get_mdlength();
if (bl.length() != aligned_mdlength) {
assert(bl.length() < aligned_mdlength);
bl.append_zero(aligned_mdlength - bl.length());
}
auto bliter = bl.cbegin();
auto metadata_crc = bliter.crc32c(
ceph::encoded_sizeof_bounded<record_group_header_t>(),
-1);
bliter += sizeof(checksum_t); /* metadata crc hole */
metadata_crc = bliter.crc32c(
bliter.get_remaining(),
metadata_crc);
ceph_le32 metadata_crc_le;
metadata_crc_le = metadata_crc;
metadata_crc_filler.copy_in(
sizeof(checksum_t),
reinterpret_cast<const char *>(&metadata_crc_le));
bl.claim_append(data_bl);
ceph_assert(bl.length() == record_group.size.get_encoded_length());
record_group.clear();
return bl;
}
std::optional<record_group_header_t>
try_decode_records_header(
const ceph::bufferlist& header_bl,
segment_nonce_t expected_nonce)
{
auto bp = header_bl.cbegin();
record_group_header_t header;
try {
decode(header, bp);
} catch (ceph::buffer::error &e) {
journal_logger().debug(
"try_decode_records_header: failed, "
"cannot decode record_group_header_t, got {}.",
e.what());
return std::nullopt;
}
if (header.segment_nonce != expected_nonce) {
journal_logger().debug(
"try_decode_records_header: failed, record_group_header nonce mismatch, "
"read {}, expected {}!",
header.segment_nonce,
expected_nonce);
return std::nullopt;
}
return header;
}
bool validate_records_metadata(
const ceph::bufferlist& md_bl)
{
auto bliter = md_bl.cbegin();
auto test_crc = bliter.crc32c(
ceph::encoded_sizeof_bounded<record_group_header_t>(),
-1);
ceph_le32 recorded_crc_le;
decode(recorded_crc_le, bliter);
uint32_t recorded_crc = recorded_crc_le;
test_crc = bliter.crc32c(
bliter.get_remaining(),
test_crc);
bool success = (test_crc == recorded_crc);
if (!success) {
journal_logger().debug(
"validate_records_metadata: failed, metadata crc mismatch.");
}
return success;
}
bool validate_records_data(
const record_group_header_t& header,
const ceph::bufferlist& data_bl)
{
bool success = (data_bl.crc32c(-1) == header.data_crc);
if (!success) {
journal_logger().debug(
"validate_records_data: failed, data crc mismatch!");
}
return success;
}
std::optional<std::vector<record_header_t>>
try_decode_record_headers(
const record_group_header_t& header,
const ceph::bufferlist& md_bl)
{
auto bliter = md_bl.cbegin();
bliter += ceph::encoded_sizeof_bounded<record_group_header_t>();
bliter += sizeof(checksum_t); /* metadata crc hole */
std::vector<record_header_t> record_headers(header.records);
for (auto &&i: record_headers) {
try {
decode(i, bliter);
} catch (ceph::buffer::error &e) {
journal_logger().debug(
"try_decode_record_headers: failed, "
"cannot decode record_header_t, got {}.",
e.what());
return std::nullopt;
}
}
return record_headers;
}
std::optional<std::vector<record_extent_infos_t> >
try_decode_extent_infos(
const record_group_header_t& header,
const ceph::bufferlist& md_bl)
{
auto maybe_headers = try_decode_record_headers(header, md_bl);
if (!maybe_headers) {
return std::nullopt;
}
auto bliter = md_bl.cbegin();
bliter += ceph::encoded_sizeof_bounded<record_group_header_t>();
bliter += sizeof(checksum_t); /* metadata crc hole */
bliter += (ceph::encoded_sizeof_bounded<record_header_t>() *
maybe_headers->size());
std::vector<record_extent_infos_t> record_extent_infos(
maybe_headers->size());
auto result_iter = record_extent_infos.begin();
for (auto& h: *maybe_headers) {
result_iter->header = h;
result_iter->extent_infos.resize(h.extents);
for (auto& i: result_iter->extent_infos) {
try {
decode(i, bliter);
} catch (ceph::buffer::error &e) {
journal_logger().debug(
"try_decode_extent_infos: failed, "
"cannot decode extent_info_t, got {}.",
e.what());
return std::nullopt;
}
}
++result_iter;
}
return record_extent_infos;
}
std::optional<std::vector<record_deltas_t> >
try_decode_deltas(
const record_group_header_t& header,
const ceph::bufferlist& md_bl,
paddr_t record_block_base)
{
auto maybe_record_extent_infos = try_decode_extent_infos(header, md_bl);
if (!maybe_record_extent_infos) {
return std::nullopt;
}
auto bliter = md_bl.cbegin();
bliter += ceph::encoded_sizeof_bounded<record_group_header_t>();
bliter += sizeof(checksum_t); /* metadata crc hole */
bliter += (ceph::encoded_sizeof_bounded<record_header_t>() *
maybe_record_extent_infos->size());
for (auto& r: *maybe_record_extent_infos) {
bliter += (ceph::encoded_sizeof_bounded<extent_info_t>() *
r.extent_infos.size());
}
std::vector<record_deltas_t> record_deltas(
maybe_record_extent_infos->size());
auto result_iter = record_deltas.begin();
for (auto& r: *maybe_record_extent_infos) {
result_iter->record_block_base = record_block_base;
result_iter->deltas.resize(r.header.deltas);
for (auto& i: result_iter->deltas) {
try {
decode(i.second, bliter);
i.first = mod_to_timepoint(r.header.modify_time);
} catch (ceph::buffer::error &e) {
journal_logger().debug(
"try_decode_deltas: failed, "
"cannot decode delta_info_t, got {}.",
e.what());
return std::nullopt;
}
}
for (auto& i: r.extent_infos) {
record_block_base = record_block_base.add_offset(i.len);
}
++result_iter;
}
return record_deltas;
}
std::ostream& operator<<(std::ostream& out, placement_hint_t h)
{
switch (h) {
case placement_hint_t::HOT:
return out << "Hint(HOT)";
case placement_hint_t::COLD:
return out << "Hint(COLD)";
case placement_hint_t::REWRITE:
return out << "Hint(REWRITE)";
case PLACEMENT_HINT_NULL:
return out << "Hint(NULL)";
default:
return out << "INVALID_PLACEMENT_HINT_TYPE!";
}
}
bool can_delay_allocation(device_type_t type) {
// Some types of device may not support delayed allocation, for example PMEM.
// All types of device currently support delayed allocation.
return true;
}
device_type_t string_to_device_type(std::string type) {
if (type == "HDD") {
return device_type_t::HDD;
}
if (type == "SSD") {
return device_type_t::SSD;
}
if (type == "ZBD") {
return device_type_t::ZBD;
}
if (type == "RANDOM_BLOCK_SSD") {
return device_type_t::RANDOM_BLOCK_SSD;
}
return device_type_t::NONE;
}
std::ostream& operator<<(std::ostream& out, device_type_t t)
{
switch (t) {
case device_type_t::NONE:
return out << "NONE";
case device_type_t::HDD:
return out << "HDD";
case device_type_t::SSD:
return out << "SSD";
case device_type_t::ZBD:
return out << "ZBD";
case device_type_t::EPHEMERAL_COLD:
return out << "EPHEMERAL_COLD";
case device_type_t::EPHEMERAL_MAIN:
return out << "EPHEMERAL_MAIN";
case device_type_t::RANDOM_BLOCK_SSD:
return out << "RANDOM_BLOCK_SSD";
case device_type_t::RANDOM_BLOCK_EPHEMERAL:
return out << "RANDOM_BLOCK_EPHEMERAL";
default:
return out << "INVALID_DEVICE_TYPE!";
}
}
std::ostream& operator<<(std::ostream& out, backend_type_t btype) {
if (btype == backend_type_t::SEGMENTED) {
return out << "SEGMENTED";
} else {
return out << "RANDOM_BLOCK";
}
}
std::ostream& operator<<(std::ostream& out, const write_result_t& w)
{
return out << "write_result_t("
<< "start=" << w.start_seq
<< ", length=" << w.length
<< ")";
}
std::ostream& operator<<(std::ostream& out, const record_locator_t& l)
{
return out << "record_locator_t("
<< "block_base=" << l.record_block_base
<< ", " << l.write_result
<< ")";
}
void scan_valid_records_cursor::emplace_record_group(
const record_group_header_t& header, ceph::bufferlist&& md_bl)
{
auto new_committed_to = header.committed_to;
ceph_assert(last_committed == JOURNAL_SEQ_NULL ||
last_committed <= new_committed_to);
last_committed = new_committed_to;
pending_record_groups.emplace_back(
seq.offset,
header,
std::move(md_bl));
increment_seq(header.dlength + header.mdlength);
ceph_assert(new_committed_to == JOURNAL_SEQ_NULL ||
new_committed_to < seq);
}
std::ostream& operator<<(std::ostream& out, const scan_valid_records_cursor& c)
{
return out << "cursor(last_valid_header_found=" << c.last_valid_header_found
<< ", seq=" << c.seq
<< ", last_committed=" << c.last_committed
<< ", pending_record_groups=" << c.pending_record_groups.size()
<< ", num_consumed_records=" << c.num_consumed_records
<< ")";
}
}
| 24,803 | 27.642032 | 82 | cc |
null | ceph-main/src/crimson/os/seastore/seastore_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <limits>
#include <numeric>
#include <optional>
#include <iostream>
#include <vector>
#include <boost/core/ignore_unused.hpp>
#include <seastar/core/lowres_clock.hh>
#include "include/byteorder.h"
#include "include/denc.h"
#include "include/buffer.h"
#include "include/intarith.h"
#include "include/interval_set.h"
#include "include/uuid.h"
namespace crimson::os::seastore {
/* using a special xattr key "omap_header" to store omap header */
const std::string OMAP_HEADER_XATTR_KEY = "omap_header";
using transaction_id_t = uint64_t;
constexpr transaction_id_t TRANS_ID_NULL = 0;
/*
* Note: NULL value is usually the default and max value.
*/
using depth_t = uint32_t;
using depth_le_t = ceph_le32;
inline depth_le_t init_depth_le(uint32_t i) {
return ceph_le32(i);
}
using checksum_t = uint32_t;
// Immutable metadata for seastore to set at mkfs time
struct seastore_meta_t {
uuid_d seastore_id;
DENC(seastore_meta_t, v, p) {
DENC_START(1, 1, p);
denc(v.seastore_id, p);
DENC_FINISH(p);
}
};
std::ostream& operator<<(std::ostream& out, const seastore_meta_t& meta);
bool is_aligned(uint64_t offset, uint64_t alignment);
// identifies a specific physical device within seastore
using device_id_t = uint8_t;
constexpr auto DEVICE_ID_BITS = std::numeric_limits<device_id_t>::digits;
constexpr device_id_t DEVICE_ID_MAX = std::numeric_limits<device_id_t>::max();
constexpr device_id_t DEVICE_ID_NULL = DEVICE_ID_MAX;
constexpr device_id_t DEVICE_ID_RECORD_RELATIVE = DEVICE_ID_MAX - 1;
constexpr device_id_t DEVICE_ID_BLOCK_RELATIVE = DEVICE_ID_MAX - 2;
constexpr device_id_t DEVICE_ID_DELAYED = DEVICE_ID_MAX - 3;
// for tests which generate fake paddrs
constexpr device_id_t DEVICE_ID_FAKE = DEVICE_ID_MAX - 4;
constexpr device_id_t DEVICE_ID_ZERO = DEVICE_ID_MAX - 5;
constexpr device_id_t DEVICE_ID_ROOT = DEVICE_ID_MAX - 6;
constexpr device_id_t DEVICE_ID_MAX_VALID = DEVICE_ID_MAX - 7;
constexpr device_id_t DEVICE_ID_MAX_VALID_SEGMENT = DEVICE_ID_MAX >> 1;
constexpr device_id_t DEVICE_ID_SEGMENTED_MIN = 0;
constexpr device_id_t DEVICE_ID_RANDOM_BLOCK_MIN =
1 << (std::numeric_limits<device_id_t>::digits - 1);
struct device_id_printer_t {
device_id_t id;
};
std::ostream &operator<<(std::ostream &out, const device_id_printer_t &id);
// 1 bit in paddr_t to identify the absolute physical address type
enum class paddr_types_t {
SEGMENT = 0,
RANDOM_BLOCK = 1,
RESERVED = 2
};
constexpr paddr_types_t device_id_to_paddr_type(device_id_t id) {
if (id > DEVICE_ID_MAX_VALID) {
return paddr_types_t::RESERVED;
} else if ((id & 0x80) == 0) {
return paddr_types_t::SEGMENT;
} else {
return paddr_types_t::RANDOM_BLOCK;
}
}
constexpr bool has_device_off(device_id_t id) {
return id == DEVICE_ID_RECORD_RELATIVE ||
id == DEVICE_ID_BLOCK_RELATIVE ||
id == DEVICE_ID_DELAYED ||
id == DEVICE_ID_FAKE ||
id == DEVICE_ID_ROOT;
}
// internal segment id type of segment_id_t below, with the top
// "DEVICE_ID_BITS" bits representing the device id of the segment.
using internal_segment_id_t = uint32_t;
constexpr auto SEGMENT_ID_BITS = std::numeric_limits<internal_segment_id_t>::digits;
// segment ids without a device id encapsulated
using device_segment_id_t = uint32_t;
constexpr auto DEVICE_SEGMENT_ID_BITS = SEGMENT_ID_BITS - DEVICE_ID_BITS;
constexpr device_segment_id_t DEVICE_SEGMENT_ID_MAX = (1 << DEVICE_SEGMENT_ID_BITS) - 1;
// Identifies segment location on disk, see SegmentManager,
struct segment_id_t {
public:
// segment_id_t() == MAX_SEG_ID == NULL_SEG_ID
segment_id_t()
: segment_id_t(DEVICE_ID_MAX_VALID_SEGMENT, DEVICE_SEGMENT_ID_MAX) {}
segment_id_t(device_id_t id, device_segment_id_t _segment)
: segment_id_t(make_internal(id, _segment)) {}
segment_id_t(internal_segment_id_t _segment)
: segment(_segment) {
assert(device_id_to_paddr_type(device_id()) == paddr_types_t::SEGMENT);
}
[[gnu::always_inline]]
constexpr device_id_t device_id() const {
return static_cast<device_id_t>(segment >> DEVICE_SEGMENT_ID_BITS);
}
[[gnu::always_inline]]
constexpr device_segment_id_t device_segment_id() const {
constexpr internal_segment_id_t _SEGMENT_ID_MASK = (1u << DEVICE_SEGMENT_ID_BITS) - 1;
return segment & _SEGMENT_ID_MASK;
}
bool operator==(const segment_id_t& other) const {
return segment == other.segment;
}
bool operator!=(const segment_id_t& other) const {
return segment != other.segment;
}
bool operator<(const segment_id_t& other) const {
return segment < other.segment;
}
bool operator<=(const segment_id_t& other) const {
return segment <= other.segment;
}
bool operator>(const segment_id_t& other) const {
return segment > other.segment;
}
bool operator>=(const segment_id_t& other) const {
return segment >= other.segment;
}
DENC(segment_id_t, v, p) {
denc(v.segment, p);
}
static constexpr segment_id_t create_const(
device_id_t id, device_segment_id_t segment) {
return segment_id_t(id, segment, const_t{});
}
private:
struct const_t {};
constexpr segment_id_t(device_id_t id, device_segment_id_t _segment, const_t)
: segment(make_internal(id, _segment)) {}
constexpr static inline internal_segment_id_t make_internal(
device_id_t d_id,
device_segment_id_t s_id) {
return static_cast<internal_segment_id_t>(s_id) |
(static_cast<internal_segment_id_t>(d_id) << DEVICE_SEGMENT_ID_BITS);
}
internal_segment_id_t segment;
friend struct segment_id_le_t;
friend struct paddr_t;
};
std::ostream &operator<<(std::ostream &out, const segment_id_t&);
// ondisk type of segment_id_t
struct __attribute((packed)) segment_id_le_t {
ceph_le32 segment = ceph_le32(segment_id_t().segment);
segment_id_le_t(const segment_id_t id) :
segment(ceph_le32(id.segment)) {}
operator segment_id_t() const {
return segment_id_t(segment);
}
};
constexpr segment_id_t MIN_SEG_ID = segment_id_t::create_const(0, 0);
// segment_id_t() == MAX_SEG_ID == NULL_SEG_ID
constexpr segment_id_t MAX_SEG_ID =
segment_id_t::create_const(DEVICE_ID_MAX_VALID_SEGMENT, DEVICE_SEGMENT_ID_MAX);
constexpr segment_id_t NULL_SEG_ID = MAX_SEG_ID;
/* Monotonically increasing segment seq, uniquely identifies
* the incarnation of a segment */
using segment_seq_t = uint32_t;
static constexpr segment_seq_t MAX_SEG_SEQ =
std::numeric_limits<segment_seq_t>::max();
static constexpr segment_seq_t NULL_SEG_SEQ = MAX_SEG_SEQ;
enum class segment_type_t : uint8_t {
JOURNAL = 0,
OOL,
NULL_SEG,
};
std::ostream& operator<<(std::ostream& out, segment_type_t t);
struct segment_seq_printer_t {
segment_seq_t seq;
};
std::ostream& operator<<(std::ostream& out, segment_seq_printer_t seq);
/**
* segment_map_t
*
* Compact templated mapping from a segment_id_t to a value type.
*/
template <typename T>
class segment_map_t {
public:
segment_map_t() {
// initializes top vector with 0 length vectors to indicate that they
// are not yet present
device_to_segments.resize(DEVICE_ID_MAX_VALID);
}
void add_device(device_id_t device, std::size_t segments, const T& init) {
ceph_assert(device <= DEVICE_ID_MAX_VALID);
ceph_assert(device_to_segments[device].size() == 0);
ceph_assert(segments > 0);
device_to_segments[device].resize(segments, init);
total_segments += segments;
}
void clear() {
device_to_segments.clear();
device_to_segments.resize(DEVICE_ID_MAX_VALID);
total_segments = 0;
}
T& operator[](segment_id_t id) {
assert(id.device_segment_id() < device_to_segments[id.device_id()].size());
return device_to_segments[id.device_id()][id.device_segment_id()];
}
const T& operator[](segment_id_t id) const {
assert(id.device_segment_id() < device_to_segments[id.device_id()].size());
return device_to_segments[id.device_id()][id.device_segment_id()];
}
bool contains(segment_id_t id) {
bool b = id.device_id() < device_to_segments.size();
if (!b) {
return b;
}
b = id.device_segment_id() < device_to_segments[id.device_id()].size();
return b;
}
auto begin() {
return iterator<false>::lower_bound(*this, 0, 0);
}
auto begin() const {
return iterator<true>::lower_bound(*this, 0, 0);
}
auto end() {
return iterator<false>::end_iterator(*this);
}
auto end() const {
return iterator<true>::end_iterator(*this);
}
auto device_begin(device_id_t id) {
auto ret = iterator<false>::lower_bound(*this, id, 0);
assert(ret->first.device_id() == id);
return ret;
}
auto device_end(device_id_t id) {
return iterator<false>::lower_bound(*this, id + 1, 0);
}
size_t size() const {
return total_segments;
}
private:
template <bool is_const = false>
class iterator {
/// points at set being iterated over
std::conditional_t<
is_const,
const segment_map_t &,
segment_map_t &> parent;
/// points at current device, or DEVICE_ID_MAX_VALID if is_end()
device_id_t device_id;
/// segment at which we are pointing, 0 if is_end()
device_segment_id_t device_segment_id;
/// holds referent for operator* and operator-> when !is_end()
std::optional<
std::pair<
const segment_id_t,
std::conditional_t<is_const, const T&, T&>
>> current;
bool is_end() const {
return device_id == DEVICE_ID_MAX_VALID;
}
void find_valid() {
assert(!is_end());
auto &device_vec = parent.device_to_segments[device_id];
if (device_vec.size() == 0 ||
device_segment_id == device_vec.size()) {
while (++device_id < DEVICE_ID_MAX_VALID &&
parent.device_to_segments[device_id].size() == 0);
device_segment_id = 0;
}
if (is_end()) {
current = std::nullopt;
} else {
current.emplace(
segment_id_t{device_id, device_segment_id},
parent.device_to_segments[device_id][device_segment_id]
);
}
}
iterator(
decltype(parent) &parent,
device_id_t device_id,
device_segment_id_t device_segment_id)
: parent(parent), device_id(device_id),
device_segment_id(device_segment_id) {}
public:
static iterator lower_bound(
decltype(parent) &parent,
device_id_t device_id,
device_segment_id_t device_segment_id) {
if (device_id == DEVICE_ID_MAX_VALID) {
return end_iterator(parent);
} else {
auto ret = iterator{parent, device_id, device_segment_id};
ret.find_valid();
return ret;
}
}
static iterator end_iterator(
decltype(parent) &parent) {
return iterator{parent, DEVICE_ID_MAX_VALID, 0};
}
iterator<is_const>& operator++() {
assert(!is_end());
++device_segment_id;
find_valid();
return *this;
}
bool operator==(iterator<is_const> rit) {
return (device_id == rit.device_id &&
device_segment_id == rit.device_segment_id);
}
bool operator!=(iterator<is_const> rit) {
return !(*this == rit);
}
template <bool c = is_const, std::enable_if_t<c, int> = 0>
const std::pair<const segment_id_t, const T&> *operator->() {
assert(!is_end());
return &*current;
}
template <bool c = is_const, std::enable_if_t<!c, int> = 0>
std::pair<const segment_id_t, T&> *operator->() {
assert(!is_end());
return &*current;
}
using reference = std::conditional_t<
is_const, const std::pair<const segment_id_t, const T&>&,
std::pair<const segment_id_t, T&>&>;
reference operator*() {
assert(!is_end());
return *current;
}
};
/**
* device_to_segments
*
* device -> segment -> T mapping. device_to_segments[d].size() > 0 iff
* device <d> has been added.
*/
std::vector<std::vector<T>> device_to_segments;
/// total number of added segments
size_t total_segments = 0;
};
/**
* paddr_t
*
* <segment, offset> offset on disk, see SegmentManager
*
* May be absolute, record_relative, or block_relative.
*
* Blocks get read independently of the surrounding record,
* so paddrs embedded directly within a block need to refer
* to other blocks within the same record by a block_relative
* addr relative to the block's own offset. By contrast,
* deltas to existing blocks need to use record_relative
* addrs relative to the first block of the record.
*
* Fresh extents during a transaction are refered to by
* record_relative paddrs.
*/
using internal_paddr_t = uint64_t;
constexpr auto PADDR_BITS = std::numeric_limits<internal_paddr_t>::digits;
/**
* device_off_t
*
* Offset within a device, may be negative for relative offsets.
*/
using device_off_t = int64_t;
using u_device_off_t = uint64_t;
constexpr auto DEVICE_OFF_BITS = PADDR_BITS - DEVICE_ID_BITS;
constexpr auto DEVICE_OFF_MAX =
std::numeric_limits<device_off_t>::max() >> DEVICE_ID_BITS;
constexpr auto DEVICE_OFF_MIN = -(DEVICE_OFF_MAX + 1);
/**
* segment_off_t
*
* Offset within a segment on disk, may be negative for relative offsets.
*/
using segment_off_t = int32_t;
using u_segment_off_t = uint32_t;
constexpr auto SEGMENT_OFF_MAX = std::numeric_limits<segment_off_t>::max();
constexpr auto SEGMENT_OFF_MIN = std::numeric_limits<segment_off_t>::min();
constexpr auto SEGMENT_OFF_BITS = std::numeric_limits<u_segment_off_t>::digits;
static_assert(PADDR_BITS == SEGMENT_ID_BITS + SEGMENT_OFF_BITS);
constexpr auto DEVICE_ID_MASK =
((internal_paddr_t(1) << DEVICE_ID_BITS) - 1) << DEVICE_OFF_BITS;
constexpr auto DEVICE_OFF_MASK =
std::numeric_limits<u_device_off_t>::max() >> DEVICE_ID_BITS;
constexpr auto SEGMENT_ID_MASK =
((internal_paddr_t(1) << SEGMENT_ID_BITS) - 1) << SEGMENT_OFF_BITS;
constexpr auto SEGMENT_OFF_MASK =
(internal_paddr_t(1) << SEGMENT_OFF_BITS) - 1;
constexpr internal_paddr_t encode_device_off(device_off_t off) {
return static_cast<internal_paddr_t>(off) & DEVICE_OFF_MASK;
}
constexpr device_off_t decode_device_off(internal_paddr_t addr) {
if (addr & (1ull << (DEVICE_OFF_BITS - 1))) {
return static_cast<device_off_t>(addr | DEVICE_ID_MASK);
} else {
return static_cast<device_off_t>(addr & DEVICE_OFF_MASK);
}
}
struct seg_paddr_t;
struct blk_paddr_t;
struct res_paddr_t;
struct paddr_t {
public:
// P_ADDR_MAX == P_ADDR_NULL == paddr_t{}
paddr_t() : paddr_t(DEVICE_ID_MAX, device_off_t(0)) {}
static paddr_t make_seg_paddr(
segment_id_t seg,
segment_off_t offset) {
return paddr_t(seg, offset);
}
static paddr_t make_seg_paddr(
device_id_t device,
device_segment_id_t seg,
segment_off_t offset) {
return paddr_t(segment_id_t(device, seg), offset);
}
static paddr_t make_blk_paddr(
device_id_t device,
device_off_t offset) {
assert(device_id_to_paddr_type(device) == paddr_types_t::RANDOM_BLOCK);
return paddr_t(device, offset);
}
static paddr_t make_res_paddr(
device_id_t device,
device_off_t offset) {
assert(device_id_to_paddr_type(device) == paddr_types_t::RESERVED);
return paddr_t(device, offset);
}
void swap(paddr_t &other) {
std::swap(internal_paddr, other.internal_paddr);
}
device_id_t get_device_id() const {
return static_cast<device_id_t>(internal_paddr >> DEVICE_OFF_BITS);
}
paddr_types_t get_addr_type() const {
return device_id_to_paddr_type(get_device_id());
}
paddr_t add_offset(device_off_t o) const;
paddr_t add_relative(paddr_t o) const;
paddr_t add_block_relative(paddr_t o) const {
// special version mainly for documentation purposes
assert(o.is_block_relative());
return add_relative(o);
}
paddr_t add_record_relative(paddr_t o) const {
// special version mainly for documentation purposes
assert(o.is_record_relative());
return add_relative(o);
}
/**
* maybe_relative_to
*
* Helper for the case where an in-memory paddr_t may be
* either block_relative or absolute (not record_relative).
*
* base must be either absolute or record_relative.
*/
paddr_t maybe_relative_to(paddr_t base) const {
assert(!base.is_block_relative());
if (is_block_relative()) {
return base.add_block_relative(*this);
} else {
return *this;
}
}
/**
* block_relative_to
*
* Only defined for record_relative paddr_ts. Yields a
* block_relative address.
*/
paddr_t block_relative_to(paddr_t rhs) const;
// To be compatible with laddr_t operator+
paddr_t operator+(device_off_t o) const {
return add_offset(o);
}
seg_paddr_t& as_seg_paddr();
const seg_paddr_t& as_seg_paddr() const;
blk_paddr_t& as_blk_paddr();
const blk_paddr_t& as_blk_paddr() const;
res_paddr_t& as_res_paddr();
const res_paddr_t& as_res_paddr() const;
bool is_delayed() const {
return get_device_id() == DEVICE_ID_DELAYED;
}
bool is_block_relative() const {
return get_device_id() == DEVICE_ID_BLOCK_RELATIVE;
}
bool is_record_relative() const {
return get_device_id() == DEVICE_ID_RECORD_RELATIVE;
}
bool is_relative() const {
return is_block_relative() || is_record_relative();
}
/// Denotes special null addr
bool is_null() const {
return get_device_id() == DEVICE_ID_NULL;
}
/// Denotes special zero addr
bool is_zero() const {
return get_device_id() == DEVICE_ID_ZERO;
}
/// Denotes the root addr
bool is_root() const {
return get_device_id() == DEVICE_ID_ROOT;
}
/**
* is_real
*
* indicates whether addr reflects a physical location, absolute, relative,
* or delayed. FAKE segments also count as real so as to reflect the way in
* which unit tests use them.
*/
bool is_real() const {
return !is_zero() && !is_null() && !is_root();
}
bool is_absolute() const {
return get_addr_type() != paddr_types_t::RESERVED;
}
bool is_fake() const {
return get_device_id() == DEVICE_ID_FAKE;
}
auto operator<=>(const paddr_t &) const = default;
DENC(paddr_t, v, p) {
DENC_START(1, 1, p);
denc(v.internal_paddr, p);
DENC_FINISH(p);
}
constexpr static paddr_t create_const(
device_id_t d_id, device_off_t offset) {
return paddr_t(d_id, offset, const_construct_t());
}
protected:
internal_paddr_t internal_paddr;
private:
// as seg
paddr_t(segment_id_t seg, segment_off_t offset)
: paddr_t((static_cast<internal_paddr_t>(seg.segment) << SEGMENT_OFF_BITS) |
static_cast<u_segment_off_t>(offset)) {}
// as blk or res
paddr_t(device_id_t d_id, device_off_t offset)
: paddr_t((static_cast<internal_paddr_t>(d_id) << DEVICE_OFF_BITS) |
encode_device_off(offset)) {
assert(offset >= DEVICE_OFF_MIN);
assert(offset <= DEVICE_OFF_MAX);
assert(get_addr_type() != paddr_types_t::SEGMENT);
}
paddr_t(internal_paddr_t val);
struct const_construct_t {};
constexpr paddr_t(device_id_t d_id, device_off_t offset, const_construct_t)
: internal_paddr((static_cast<internal_paddr_t>(d_id) << DEVICE_OFF_BITS) |
static_cast<u_device_off_t>(offset)) {}
friend struct paddr_le_t;
};
std::ostream &operator<<(std::ostream &out, const paddr_t &rhs);
struct seg_paddr_t : public paddr_t {
seg_paddr_t(const seg_paddr_t&) = delete;
seg_paddr_t(seg_paddr_t&) = delete;
seg_paddr_t& operator=(const seg_paddr_t&) = delete;
seg_paddr_t& operator=(seg_paddr_t&) = delete;
segment_id_t get_segment_id() const {
return segment_id_t(static_cast<internal_segment_id_t>(
internal_paddr >> SEGMENT_OFF_BITS));
}
segment_off_t get_segment_off() const {
return segment_off_t(internal_paddr & SEGMENT_OFF_MASK);
}
void set_segment_off(segment_off_t off) {
assert(off >= 0);
internal_paddr = (internal_paddr & SEGMENT_ID_MASK);
internal_paddr |= static_cast<u_segment_off_t>(off);
}
paddr_t add_offset(device_off_t o) const {
device_off_t off = get_segment_off() + o;
assert(off >= 0);
assert(off <= SEGMENT_OFF_MAX);
return paddr_t::make_seg_paddr(
get_segment_id(), static_cast<segment_off_t>(off));
}
};
struct blk_paddr_t : public paddr_t {
blk_paddr_t(const blk_paddr_t&) = delete;
blk_paddr_t(blk_paddr_t&) = delete;
blk_paddr_t& operator=(const blk_paddr_t&) = delete;
blk_paddr_t& operator=(blk_paddr_t&) = delete;
device_off_t get_device_off() const {
return decode_device_off(internal_paddr);
}
void set_device_off(device_off_t off) {
assert(off >= 0);
assert(off <= DEVICE_OFF_MAX);
internal_paddr = (internal_paddr & DEVICE_ID_MASK);
internal_paddr |= encode_device_off(off);
}
paddr_t add_offset(device_off_t o) const {
assert(o >= DEVICE_OFF_MIN);
assert(o <= DEVICE_OFF_MAX);
auto off = get_device_off() + o;
return paddr_t::make_blk_paddr(get_device_id(), off);
}
};
struct res_paddr_t : public paddr_t {
res_paddr_t(const res_paddr_t&) = delete;
res_paddr_t(res_paddr_t&) = delete;
res_paddr_t& operator=(const res_paddr_t&) = delete;
res_paddr_t& operator=(res_paddr_t&) = delete;
device_off_t get_device_off() const {
return decode_device_off(internal_paddr);
}
void set_device_off(device_off_t off) {
assert(has_device_off(get_device_id()));
assert(off >= DEVICE_OFF_MIN);
assert(off <= DEVICE_OFF_MAX);
internal_paddr = (internal_paddr & DEVICE_ID_MASK);
internal_paddr |= encode_device_off(off);
}
paddr_t add_offset(device_off_t o) const {
assert(has_device_off(get_device_id()));
assert(o >= DEVICE_OFF_MIN);
assert(o <= DEVICE_OFF_MAX);
auto off = get_device_off() + o;
return paddr_t::make_res_paddr(get_device_id(), off);
}
paddr_t block_relative_to(const res_paddr_t &rhs) const {
assert(rhs.is_record_relative() && is_record_relative());
auto off = get_device_off() - rhs.get_device_off();
return paddr_t::make_res_paddr(DEVICE_ID_BLOCK_RELATIVE, off);
}
};
constexpr paddr_t P_ADDR_MIN = paddr_t::create_const(0, 0);
// P_ADDR_MAX == P_ADDR_NULL == paddr_t{}
constexpr paddr_t P_ADDR_MAX = paddr_t::create_const(DEVICE_ID_MAX, 0);
constexpr paddr_t P_ADDR_NULL = P_ADDR_MAX;
constexpr paddr_t P_ADDR_ZERO = paddr_t::create_const(DEVICE_ID_ZERO, 0);
constexpr paddr_t P_ADDR_ROOT = paddr_t::create_const(DEVICE_ID_ROOT, 0);
inline paddr_t make_record_relative_paddr(device_off_t off) {
return paddr_t::make_res_paddr(DEVICE_ID_RECORD_RELATIVE, off);
}
inline paddr_t make_block_relative_paddr(device_off_t off) {
return paddr_t::make_res_paddr(DEVICE_ID_BLOCK_RELATIVE, off);
}
inline paddr_t make_fake_paddr(device_off_t off) {
return paddr_t::make_res_paddr(DEVICE_ID_FAKE, off);
}
inline paddr_t make_delayed_temp_paddr(device_off_t off) {
return paddr_t::make_res_paddr(DEVICE_ID_DELAYED, off);
}
inline const seg_paddr_t& paddr_t::as_seg_paddr() const {
assert(get_addr_type() == paddr_types_t::SEGMENT);
return *static_cast<const seg_paddr_t*>(this);
}
inline seg_paddr_t& paddr_t::as_seg_paddr() {
assert(get_addr_type() == paddr_types_t::SEGMENT);
return *static_cast<seg_paddr_t*>(this);
}
inline const blk_paddr_t& paddr_t::as_blk_paddr() const {
assert(get_addr_type() == paddr_types_t::RANDOM_BLOCK);
return *static_cast<const blk_paddr_t*>(this);
}
inline blk_paddr_t& paddr_t::as_blk_paddr() {
assert(get_addr_type() == paddr_types_t::RANDOM_BLOCK);
return *static_cast<blk_paddr_t*>(this);
}
inline const res_paddr_t& paddr_t::as_res_paddr() const {
assert(get_addr_type() == paddr_types_t::RESERVED);
return *static_cast<const res_paddr_t*>(this);
}
inline res_paddr_t& paddr_t::as_res_paddr() {
assert(get_addr_type() == paddr_types_t::RESERVED);
return *static_cast<res_paddr_t*>(this);
}
inline paddr_t::paddr_t(internal_paddr_t val) : internal_paddr(val) {
#ifndef NDEBUG
auto type = get_addr_type();
if (type == paddr_types_t::SEGMENT) {
assert(as_seg_paddr().get_segment_off() >= 0);
} else if (type == paddr_types_t::RANDOM_BLOCK) {
assert(as_blk_paddr().get_device_off() >= 0);
} else {
assert(type == paddr_types_t::RESERVED);
if (!has_device_off(get_device_id())) {
assert(as_res_paddr().get_device_off() == 0);
}
}
#endif
}
#define PADDR_OPERATION(a_type, base, func) \
if (get_addr_type() == a_type) { \
return static_cast<const base*>(this)->func; \
}
inline paddr_t paddr_t::add_offset(device_off_t o) const {
PADDR_OPERATION(paddr_types_t::SEGMENT, seg_paddr_t, add_offset(o))
PADDR_OPERATION(paddr_types_t::RANDOM_BLOCK, blk_paddr_t, add_offset(o))
PADDR_OPERATION(paddr_types_t::RESERVED, res_paddr_t, add_offset(o))
ceph_assert(0 == "not supported type");
return P_ADDR_NULL;
}
inline paddr_t paddr_t::add_relative(paddr_t o) const {
assert(o.is_relative());
auto &res_o = o.as_res_paddr();
return add_offset(res_o.get_device_off());
}
inline paddr_t paddr_t::block_relative_to(paddr_t rhs) const {
return as_res_paddr().block_relative_to(rhs.as_res_paddr());
}
struct __attribute((packed)) paddr_le_t {
ceph_le64 internal_paddr =
ceph_le64(P_ADDR_NULL.internal_paddr);
using orig_type = paddr_t;
paddr_le_t() = default;
paddr_le_t(const paddr_t &addr) : internal_paddr(ceph_le64(addr.internal_paddr)) {}
operator paddr_t() const {
return paddr_t{internal_paddr};
}
};
using objaddr_t = uint32_t;
constexpr objaddr_t OBJ_ADDR_MAX = std::numeric_limits<objaddr_t>::max();
constexpr objaddr_t OBJ_ADDR_NULL = OBJ_ADDR_MAX;
enum class placement_hint_t {
HOT = 0, // The default user hint that expects mutations or retirement
COLD, // Expect no mutations and no retirement in the near future
REWRITE, // Hint for the internal rewrites
NUM_HINTS // Constant for number of hints or as NULL
};
constexpr auto PLACEMENT_HINT_NULL = placement_hint_t::NUM_HINTS;
std::ostream& operator<<(std::ostream& out, placement_hint_t h);
enum class device_type_t : uint8_t {
NONE = 0,
HDD,
SSD,
ZBD, // ZNS SSD or SMR HDD
EPHEMERAL_COLD,
EPHEMERAL_MAIN,
RANDOM_BLOCK_SSD,
RANDOM_BLOCK_EPHEMERAL,
NUM_TYPES
};
std::ostream& operator<<(std::ostream& out, device_type_t t);
bool can_delay_allocation(device_type_t type);
device_type_t string_to_device_type(std::string type);
enum class backend_type_t {
SEGMENTED, // SegmentManager: SSD, ZBD, HDD
RANDOM_BLOCK // RBMDevice: RANDOM_BLOCK_SSD
};
std::ostream& operator<<(std::ostream& out, backend_type_t);
using journal_type_t = backend_type_t;
constexpr backend_type_t get_default_backend_of_device(device_type_t dtype) {
assert(dtype != device_type_t::NONE &&
dtype != device_type_t::NUM_TYPES);
if (dtype >= device_type_t::HDD &&
dtype <= device_type_t::EPHEMERAL_MAIN) {
return backend_type_t::SEGMENTED;
} else {
return backend_type_t::RANDOM_BLOCK;
}
}
/**
* Monotonically increasing identifier for the location of a
* journal_record.
*/
// JOURNAL_SEQ_NULL == JOURNAL_SEQ_MAX == journal_seq_t{}
struct journal_seq_t {
segment_seq_t segment_seq = NULL_SEG_SEQ;
paddr_t offset = P_ADDR_NULL;
void swap(journal_seq_t &other) {
std::swap(segment_seq, other.segment_seq);
std::swap(offset, other.offset);
}
// produces a pseudo journal_seq_t relative to this by offset
journal_seq_t add_offset(
journal_type_t type,
device_off_t off,
device_off_t roll_start,
device_off_t roll_size) const;
device_off_t relative_to(
journal_type_t type,
const journal_seq_t& r,
device_off_t roll_start,
device_off_t roll_size) const;
DENC(journal_seq_t, v, p) {
DENC_START(1, 1, p);
denc(v.segment_seq, p);
denc(v.offset, p);
DENC_FINISH(p);
}
bool operator==(const journal_seq_t &o) const { return cmp(o) == 0; }
bool operator!=(const journal_seq_t &o) const { return cmp(o) != 0; }
bool operator<(const journal_seq_t &o) const { return cmp(o) < 0; }
bool operator<=(const journal_seq_t &o) const { return cmp(o) <= 0; }
bool operator>(const journal_seq_t &o) const { return cmp(o) > 0; }
bool operator>=(const journal_seq_t &o) const { return cmp(o) >= 0; }
private:
int cmp(const journal_seq_t &other) const {
if (segment_seq > other.segment_seq) {
return 1;
} else if (segment_seq < other.segment_seq) {
return -1;
}
using ret_t = std::pair<device_off_t, segment_id_t>;
auto to_pair = [](const paddr_t &addr) -> ret_t {
if (addr.get_addr_type() == paddr_types_t::SEGMENT) {
auto &seg_addr = addr.as_seg_paddr();
return ret_t(seg_addr.get_segment_off(), seg_addr.get_segment_id());
} else if (addr.get_addr_type() == paddr_types_t::RANDOM_BLOCK) {
auto &blk_addr = addr.as_blk_paddr();
return ret_t(blk_addr.get_device_off(), MAX_SEG_ID);
} else if (addr.get_addr_type() == paddr_types_t::RESERVED) {
auto &res_addr = addr.as_res_paddr();
return ret_t(res_addr.get_device_off(), MAX_SEG_ID);
} else {
assert(0 == "impossible");
return ret_t(0, MAX_SEG_ID);
}
};
auto left = to_pair(offset);
auto right = to_pair(other.offset);
if (left > right) {
return 1;
} else if (left < right) {
return -1;
} else {
return 0;
}
}
};
std::ostream &operator<<(std::ostream &out, const journal_seq_t &seq);
constexpr journal_seq_t JOURNAL_SEQ_MIN{
0,
P_ADDR_MIN
};
constexpr journal_seq_t JOURNAL_SEQ_MAX{
MAX_SEG_SEQ,
P_ADDR_MAX
};
// JOURNAL_SEQ_NULL == JOURNAL_SEQ_MAX == journal_seq_t{}
constexpr journal_seq_t JOURNAL_SEQ_NULL = JOURNAL_SEQ_MAX;
// logical addr, see LBAManager, TransactionManager
using laddr_t = uint64_t;
constexpr laddr_t L_ADDR_MIN = std::numeric_limits<laddr_t>::min();
constexpr laddr_t L_ADDR_MAX = std::numeric_limits<laddr_t>::max();
constexpr laddr_t L_ADDR_NULL = L_ADDR_MAX;
constexpr laddr_t L_ADDR_ROOT = L_ADDR_MAX - 1;
constexpr laddr_t L_ADDR_LBAT = L_ADDR_MAX - 2;
struct __attribute((packed)) laddr_le_t {
ceph_le64 laddr = ceph_le64(L_ADDR_NULL);
using orig_type = laddr_t;
laddr_le_t() = default;
laddr_le_t(const laddr_le_t &) = default;
explicit laddr_le_t(const laddr_t &addr)
: laddr(ceph_le64(addr)) {}
operator laddr_t() const {
return laddr_t(laddr);
}
laddr_le_t& operator=(laddr_t addr) {
ceph_le64 val;
val = addr;
laddr = val;
return *this;
}
};
// logical offset, see LBAManager, TransactionManager
using extent_len_t = uint32_t;
constexpr extent_len_t EXTENT_LEN_MAX =
std::numeric_limits<extent_len_t>::max();
using extent_len_le_t = ceph_le32;
inline extent_len_le_t init_extent_len_le(extent_len_t len) {
return ceph_le32(len);
}
struct laddr_list_t : std::list<std::pair<laddr_t, extent_len_t>> {
template <typename... T>
laddr_list_t(T&&... args)
: std::list<std::pair<laddr_t, extent_len_t>>(std::forward<T>(args)...) {}
};
struct paddr_list_t : std::list<std::pair<paddr_t, extent_len_t>> {
template <typename... T>
paddr_list_t(T&&... args)
: std::list<std::pair<paddr_t, extent_len_t>>(std::forward<T>(args)...) {}
};
std::ostream &operator<<(std::ostream &out, const laddr_list_t &rhs);
std::ostream &operator<<(std::ostream &out, const paddr_list_t &rhs);
/* identifies type of extent, used for interpretting deltas, managing
* writeback.
*
* Note that any new extent type needs to be added to
* Cache::get_extent_by_type in cache.cc
*/
enum class extent_types_t : uint8_t {
ROOT = 0,
LADDR_INTERNAL = 1,
LADDR_LEAF = 2,
DINK_LADDR_LEAF = 3, // should only be used for unitttests
OMAP_INNER = 4,
OMAP_LEAF = 5,
ONODE_BLOCK_STAGED = 6,
COLL_BLOCK = 7,
OBJECT_DATA_BLOCK = 8,
RETIRED_PLACEHOLDER = 9,
// the following two types are not extent types,
// they are just used to indicates paddr allocation deltas
ALLOC_INFO = 10,
JOURNAL_TAIL = 11,
// Test Block Types
TEST_BLOCK = 12,
TEST_BLOCK_PHYSICAL = 13,
BACKREF_INTERNAL = 14,
BACKREF_LEAF = 15,
// None and the number of valid extent_types_t
NONE = 16,
};
using extent_types_le_t = uint8_t;
constexpr auto EXTENT_TYPES_MAX = static_cast<uint8_t>(extent_types_t::NONE);
constexpr size_t BACKREF_NODE_SIZE = 4096;
std::ostream &operator<<(std::ostream &out, extent_types_t t);
constexpr bool is_logical_type(extent_types_t type) {
switch (type) {
case extent_types_t::ROOT:
case extent_types_t::LADDR_INTERNAL:
case extent_types_t::LADDR_LEAF:
case extent_types_t::BACKREF_INTERNAL:
case extent_types_t::BACKREF_LEAF:
return false;
default:
return true;
}
}
constexpr bool is_retired_placeholder(extent_types_t type)
{
return type == extent_types_t::RETIRED_PLACEHOLDER;
}
constexpr bool is_lba_node(extent_types_t type)
{
return type == extent_types_t::LADDR_INTERNAL ||
type == extent_types_t::LADDR_LEAF ||
type == extent_types_t::DINK_LADDR_LEAF;
}
constexpr bool is_backref_node(extent_types_t type)
{
return type == extent_types_t::BACKREF_INTERNAL ||
type == extent_types_t::BACKREF_LEAF;
}
constexpr bool is_lba_backref_node(extent_types_t type)
{
return is_lba_node(type) || is_backref_node(type);
}
std::ostream &operator<<(std::ostream &out, extent_types_t t);
/**
* rewrite_gen_t
*
* The goal is to group the similar aged extents in the same segment for better
* bimodel utilization distribution, and also to the same device tier. For EPM,
* it has the flexibility to make placement decisions by re-assigning the
* generation. And each non-inline generation will be statically mapped to a
* writer in EPM.
*
* All the fresh and dirty extents start with INIT_GENERATION upon allocation,
* and they will be assigned to INLINE/OOL generation by EPM before the initial
* writes. After that, the generation can only be increased upon rewrite.
*
* Note, although EPM can re-assign the generations according to the tiering
* status, it cannot decrease the generation for the correctness of space
* reservation. It may choose to assign a larger generation if the extent is
* hinted cold, or if want to evict extents to the cold tier. And it may choose
* to not increase the generation if want to keep the hot tier as filled as
* possible.
*/
using rewrite_gen_t = uint8_t;
// INIT_GENERATION requires EPM decision to INLINE/OOL_GENERATION
constexpr rewrite_gen_t INIT_GENERATION = 0;
constexpr rewrite_gen_t INLINE_GENERATION = 1; // to the journal
constexpr rewrite_gen_t OOL_GENERATION = 2;
// All the rewritten extents start with MIN_REWRITE_GENERATION
constexpr rewrite_gen_t MIN_REWRITE_GENERATION = 3;
// without cold tier, the largest generation is less than MIN_COLD_GENERATION
constexpr rewrite_gen_t MIN_COLD_GENERATION = 5;
constexpr rewrite_gen_t MAX_REWRITE_GENERATION = 7;
constexpr rewrite_gen_t REWRITE_GENERATIONS = MAX_REWRITE_GENERATION + 1;
constexpr rewrite_gen_t NULL_GENERATION =
std::numeric_limits<rewrite_gen_t>::max();
struct rewrite_gen_printer_t {
rewrite_gen_t gen;
};
std::ostream &operator<<(std::ostream &out, rewrite_gen_printer_t gen);
constexpr std::size_t generation_to_writer(rewrite_gen_t gen) {
// caller to assert the gen is in the reasonable range
return gen - OOL_GENERATION;
}
// before EPM decision
constexpr bool is_target_rewrite_generation(rewrite_gen_t gen) {
return gen == INIT_GENERATION ||
(gen >= MIN_REWRITE_GENERATION &&
gen <= REWRITE_GENERATIONS);
}
// after EPM decision
constexpr bool is_rewrite_generation(rewrite_gen_t gen) {
return gen >= INLINE_GENERATION &&
gen < REWRITE_GENERATIONS;
}
enum class data_category_t : uint8_t {
METADATA = 0,
DATA,
NUM
};
std::ostream &operator<<(std::ostream &out, data_category_t c);
constexpr data_category_t get_extent_category(extent_types_t type) {
if (type == extent_types_t::OBJECT_DATA_BLOCK ||
type == extent_types_t::TEST_BLOCK) {
return data_category_t::DATA;
} else {
return data_category_t::METADATA;
}
}
// type for extent modification time, milliseconds since the epoch
using sea_time_point = seastar::lowres_system_clock::time_point;
using sea_duration = seastar::lowres_system_clock::duration;
using mod_time_point_t = int64_t;
constexpr mod_time_point_t
timepoint_to_mod(const sea_time_point &t) {
return std::chrono::duration_cast<std::chrono::milliseconds>(
t.time_since_epoch()).count();
}
constexpr sea_time_point
mod_to_timepoint(mod_time_point_t t) {
return sea_time_point(std::chrono::duration_cast<sea_duration>(
std::chrono::milliseconds(t)));
}
constexpr auto NULL_TIME = sea_time_point();
constexpr auto NULL_MOD_TIME = timepoint_to_mod(NULL_TIME);
struct sea_time_point_printer_t {
sea_time_point tp;
};
std::ostream &operator<<(std::ostream &out, sea_time_point_printer_t tp);
struct mod_time_point_printer_t {
mod_time_point_t tp;
};
std::ostream &operator<<(std::ostream &out, mod_time_point_printer_t tp);
constexpr sea_time_point
get_average_time(const sea_time_point& t1, std::size_t n1,
const sea_time_point& t2, std::size_t n2) {
assert(t1 != NULL_TIME);
assert(t2 != NULL_TIME);
auto new_size = n1 + n2;
assert(new_size > 0);
auto c1 = t1.time_since_epoch().count();
auto c2 = t2.time_since_epoch().count();
auto c_ret = c1 / new_size * n1 + c2 / new_size * n2;
return sea_time_point(sea_duration(c_ret));
}
/* description of a new physical extent */
struct extent_t {
extent_types_t type; ///< type of extent
laddr_t addr; ///< laddr of extent (L_ADDR_NULL for non-logical)
ceph::bufferlist bl; ///< payload, bl.length() == length, aligned
};
using extent_version_t = uint32_t;
/* description of a mutation to a physical extent */
struct delta_info_t {
extent_types_t type = extent_types_t::NONE; ///< delta type
paddr_t paddr; ///< physical address
laddr_t laddr = L_ADDR_NULL; ///< logical address
uint32_t prev_crc = 0;
uint32_t final_crc = 0;
extent_len_t length = 0; ///< extent length
extent_version_t pversion; ///< prior version
segment_seq_t ext_seq; ///< seq of the extent's segment
segment_type_t seg_type;
ceph::bufferlist bl; ///< payload
DENC(delta_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.type, p);
denc(v.paddr, p);
denc(v.laddr, p);
denc(v.prev_crc, p);
denc(v.final_crc, p);
denc(v.length, p);
denc(v.pversion, p);
denc(v.ext_seq, p);
denc(v.seg_type, p);
denc(v.bl, p);
DENC_FINISH(p);
}
bool operator==(const delta_info_t &rhs) const {
return (
type == rhs.type &&
paddr == rhs.paddr &&
laddr == rhs.laddr &&
prev_crc == rhs.prev_crc &&
final_crc == rhs.final_crc &&
length == rhs.length &&
pversion == rhs.pversion &&
ext_seq == rhs.ext_seq &&
bl == rhs.bl
);
}
};
std::ostream &operator<<(std::ostream &out, const delta_info_t &delta);
/* contains the latest journal tail information */
struct journal_tail_delta_t {
journal_seq_t alloc_tail;
journal_seq_t dirty_tail;
DENC(journal_tail_delta_t, v, p) {
DENC_START(1, 1, p);
denc(v.alloc_tail, p);
denc(v.dirty_tail, p);
DENC_FINISH(p);
}
};
std::ostream &operator<<(std::ostream &out, const journal_tail_delta_t &delta);
class object_data_t {
laddr_t reserved_data_base = L_ADDR_NULL;
extent_len_t reserved_data_len = 0;
bool dirty = false;
public:
object_data_t(
laddr_t reserved_data_base,
extent_len_t reserved_data_len)
: reserved_data_base(reserved_data_base),
reserved_data_len(reserved_data_len) {}
laddr_t get_reserved_data_base() const {
return reserved_data_base;
}
extent_len_t get_reserved_data_len() const {
return reserved_data_len;
}
bool is_null() const {
return reserved_data_base == L_ADDR_NULL;
}
bool must_update() const {
return dirty;
}
void update_reserved(
laddr_t base,
extent_len_t len) {
dirty = true;
reserved_data_base = base;
reserved_data_len = len;
}
void update_len(
extent_len_t len) {
dirty = true;
reserved_data_len = len;
}
void clear() {
dirty = true;
reserved_data_base = L_ADDR_NULL;
reserved_data_len = 0;
}
};
struct __attribute__((packed)) object_data_le_t {
laddr_le_t reserved_data_base = laddr_le_t(L_ADDR_NULL);
extent_len_le_t reserved_data_len = init_extent_len_le(0);
void update(const object_data_t &nroot) {
reserved_data_base = nroot.get_reserved_data_base();
reserved_data_len = init_extent_len_le(nroot.get_reserved_data_len());
}
object_data_t get() const {
return object_data_t(
reserved_data_base,
reserved_data_len);
}
};
struct omap_root_t {
laddr_t addr = L_ADDR_NULL;
depth_t depth = 0;
laddr_t hint = L_ADDR_MIN;
bool mutated = false;
omap_root_t() = default;
omap_root_t(laddr_t addr, depth_t depth, laddr_t addr_min)
: addr(addr),
depth(depth),
hint(addr_min) {}
omap_root_t(const omap_root_t &o) = default;
omap_root_t(omap_root_t &&o) = default;
omap_root_t &operator=(const omap_root_t &o) = default;
omap_root_t &operator=(omap_root_t &&o) = default;
bool is_null() const {
return addr == L_ADDR_NULL;
}
bool must_update() const {
return mutated;
}
void update(laddr_t _addr, depth_t _depth, laddr_t _hint) {
mutated = true;
addr = _addr;
depth = _depth;
hint = _hint;
}
laddr_t get_location() const {
return addr;
}
depth_t get_depth() const {
return depth;
}
laddr_t get_hint() const {
return hint;
}
};
std::ostream &operator<<(std::ostream &out, const omap_root_t &root);
class __attribute__((packed)) omap_root_le_t {
laddr_le_t addr = laddr_le_t(L_ADDR_NULL);
depth_le_t depth = init_depth_le(0);
public:
omap_root_le_t() = default;
omap_root_le_t(laddr_t addr, depth_t depth)
: addr(addr), depth(init_depth_le(depth)) {}
omap_root_le_t(const omap_root_le_t &o) = default;
omap_root_le_t(omap_root_le_t &&o) = default;
omap_root_le_t &operator=(const omap_root_le_t &o) = default;
omap_root_le_t &operator=(omap_root_le_t &&o) = default;
void update(const omap_root_t &nroot) {
addr = nroot.get_location();
depth = init_depth_le(nroot.get_depth());
}
omap_root_t get(laddr_t hint) const {
return omap_root_t(addr, depth, hint);
}
};
/**
* phy_tree_root_t
*/
class __attribute__((packed)) phy_tree_root_t {
paddr_le_t root_addr;
depth_le_t depth = init_extent_len_le(0);
public:
phy_tree_root_t() = default;
phy_tree_root_t(paddr_t addr, depth_t depth)
: root_addr(addr), depth(init_depth_le(depth)) {}
phy_tree_root_t(const phy_tree_root_t &o) = default;
phy_tree_root_t(phy_tree_root_t &&o) = default;
phy_tree_root_t &operator=(const phy_tree_root_t &o) = default;
phy_tree_root_t &operator=(phy_tree_root_t &&o) = default;
paddr_t get_location() const {
return root_addr;
}
void set_location(paddr_t location) {
root_addr = location;
}
depth_t get_depth() const {
return depth;
}
void set_depth(depth_t ndepth) {
depth = ndepth;
}
void adjust_addrs_from_base(paddr_t base) {
paddr_t _root_addr = root_addr;
if (_root_addr.is_relative()) {
root_addr = base.add_record_relative(_root_addr);
}
}
};
class coll_root_t {
laddr_t addr = L_ADDR_NULL;
extent_len_t size = 0;
bool mutated = false;
public:
coll_root_t() = default;
coll_root_t(laddr_t addr, extent_len_t size) : addr(addr), size(size) {}
coll_root_t(const coll_root_t &o) = default;
coll_root_t(coll_root_t &&o) = default;
coll_root_t &operator=(const coll_root_t &o) = default;
coll_root_t &operator=(coll_root_t &&o) = default;
bool must_update() const {
return mutated;
}
void update(laddr_t _addr, extent_len_t _s) {
mutated = true;
addr = _addr;
size = _s;
}
laddr_t get_location() const {
return addr;
}
extent_len_t get_size() const {
return size;
}
};
/**
* coll_root_le_t
*
* Information for locating CollectionManager information, to be embedded
* in root block.
*/
class __attribute__((packed)) coll_root_le_t {
laddr_le_t addr;
extent_len_le_t size = init_extent_len_le(0);
public:
coll_root_le_t() = default;
coll_root_le_t(laddr_t laddr, extent_len_t size)
: addr(laddr), size(init_extent_len_le(size)) {}
coll_root_le_t(const coll_root_le_t &o) = default;
coll_root_le_t(coll_root_le_t &&o) = default;
coll_root_le_t &operator=(const coll_root_le_t &o) = default;
coll_root_le_t &operator=(coll_root_le_t &&o) = default;
void update(const coll_root_t &nroot) {
addr = nroot.get_location();
size = init_extent_len_le(nroot.get_size());
}
coll_root_t get() const {
return coll_root_t(addr, size);
}
};
using lba_root_t = phy_tree_root_t;
using backref_root_t = phy_tree_root_t;
/**
* root_t
*
* Contains information required to find metadata roots.
* TODO: generalize this to permit more than one lba_manager implementation
*/
struct __attribute__((packed)) root_t {
using meta_t = std::map<std::string, std::string>;
static constexpr int MAX_META_LENGTH = 1024;
backref_root_t backref_root;
lba_root_t lba_root;
laddr_le_t onode_root;
coll_root_le_t collection_root;
char meta[MAX_META_LENGTH];
root_t() {
set_meta(meta_t{});
}
void adjust_addrs_from_base(paddr_t base) {
lba_root.adjust_addrs_from_base(base);
backref_root.adjust_addrs_from_base(base);
}
meta_t get_meta() {
bufferlist bl;
bl.append(ceph::buffer::create_static(MAX_META_LENGTH, meta));
meta_t ret;
auto iter = bl.cbegin();
decode(ret, iter);
return ret;
}
void set_meta(const meta_t &m) {
ceph::bufferlist bl;
encode(m, bl);
ceph_assert(bl.length() < MAX_META_LENGTH);
bl.rebuild();
auto &bptr = bl.front();
::memset(meta, 0, MAX_META_LENGTH);
::memcpy(meta, bptr.c_str(), bl.length());
}
};
struct alloc_blk_t {
alloc_blk_t(
paddr_t paddr,
laddr_t laddr,
extent_len_t len,
extent_types_t type)
: paddr(paddr), laddr(laddr), len(len), type(type)
{}
explicit alloc_blk_t() = default;
paddr_t paddr = P_ADDR_NULL;
laddr_t laddr = L_ADDR_NULL;
extent_len_t len = 0;
extent_types_t type = extent_types_t::ROOT;
DENC(alloc_blk_t, v, p) {
DENC_START(1, 1, p);
denc(v.paddr, p);
denc(v.laddr, p);
denc(v.len, p);
denc(v.type, p);
DENC_FINISH(p);
}
};
// use absolute address
struct alloc_delta_t {
enum class op_types_t : uint8_t {
NONE = 0,
SET = 1,
CLEAR = 2
};
std::vector<alloc_blk_t> alloc_blk_ranges;
op_types_t op = op_types_t::NONE;
alloc_delta_t() = default;
DENC(alloc_delta_t, v, p) {
DENC_START(1, 1, p);
denc(v.alloc_blk_ranges, p);
denc(v.op, p);
DENC_FINISH(p);
}
};
struct extent_info_t {
extent_types_t type = extent_types_t::NONE;
laddr_t addr = L_ADDR_NULL;
extent_len_t len = 0;
extent_info_t() = default;
extent_info_t(const extent_t &et)
: type(et.type), addr(et.addr),
len(et.bl.length())
{}
DENC(extent_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.type, p);
denc(v.addr, p);
denc(v.len, p);
DENC_FINISH(p);
}
};
std::ostream &operator<<(std::ostream &out, const extent_info_t &header);
using segment_nonce_t = uint32_t;
/**
* Segment header
*
* Every segment contains and encode segment_header_t in the first block.
* Our strategy for finding the journal replay point is:
* 1) Find the segment with the highest journal_segment_seq
* 2) Get dirty_tail and alloc_tail from the segment header
* 3) Scan forward to update tails from journal_tail_delta_t
* 4) Replay from the latest tails
*/
struct segment_header_t {
segment_seq_t segment_seq;
segment_id_t physical_segment_id; // debugging
journal_seq_t dirty_tail;
journal_seq_t alloc_tail;
segment_nonce_t segment_nonce;
segment_type_t type;
data_category_t category;
rewrite_gen_t generation;
segment_type_t get_type() const {
return type;
}
DENC(segment_header_t, v, p) {
DENC_START(1, 1, p);
denc(v.segment_seq, p);
denc(v.physical_segment_id, p);
denc(v.dirty_tail, p);
denc(v.alloc_tail, p);
denc(v.segment_nonce, p);
denc(v.type, p);
denc(v.category, p);
denc(v.generation, p);
DENC_FINISH(p);
}
};
std::ostream &operator<<(std::ostream &out, const segment_header_t &header);
struct segment_tail_t {
segment_seq_t segment_seq;
segment_id_t physical_segment_id; // debugging
segment_nonce_t segment_nonce;
segment_type_t type;
mod_time_point_t modify_time;
std::size_t num_extents;
segment_type_t get_type() const {
return type;
}
DENC(segment_tail_t, v, p) {
DENC_START(1, 1, p);
denc(v.segment_seq, p);
denc(v.physical_segment_id, p);
denc(v.segment_nonce, p);
denc(v.type, p);
denc(v.modify_time, p);
denc(v.num_extents, p);
DENC_FINISH(p);
}
};
std::ostream &operator<<(std::ostream &out, const segment_tail_t &tail);
enum class transaction_type_t : uint8_t {
MUTATE = 0,
READ, // including weak and non-weak read transactions
TRIM_DIRTY,
TRIM_ALLOC,
CLEANER_MAIN,
CLEANER_COLD,
MAX
};
static constexpr auto TRANSACTION_TYPE_NULL = transaction_type_t::MAX;
static constexpr auto TRANSACTION_TYPE_MAX = static_cast<std::size_t>(
transaction_type_t::MAX);
std::ostream &operator<<(std::ostream &os, transaction_type_t type);
constexpr bool is_valid_transaction(transaction_type_t type) {
return type < transaction_type_t::MAX;
}
constexpr bool is_background_transaction(transaction_type_t type) {
return (type >= transaction_type_t::TRIM_DIRTY &&
type < transaction_type_t::MAX);
}
constexpr bool is_trim_transaction(transaction_type_t type) {
return (type == transaction_type_t::TRIM_DIRTY ||
type == transaction_type_t::TRIM_ALLOC);
}
struct record_size_t {
extent_len_t plain_mdlength = 0; // mdlength without the record header
extent_len_t dlength = 0;
extent_len_t get_raw_mdlength() const;
bool is_empty() const {
return plain_mdlength == 0 &&
dlength == 0;
}
void account_extent(extent_len_t extent_len);
void account(const extent_t& extent) {
account_extent(extent.bl.length());
}
void account(const delta_info_t& delta);
bool operator==(const record_size_t &) const = default;
};
std::ostream &operator<<(std::ostream&, const record_size_t&);
struct record_t {
transaction_type_t type = TRANSACTION_TYPE_NULL;
std::vector<extent_t> extents;
std::vector<delta_info_t> deltas;
record_size_t size;
sea_time_point modify_time = NULL_TIME;
record_t(transaction_type_t type) : type{type} { }
// unit test only
record_t() {
type = transaction_type_t::MUTATE;
}
// unit test only
record_t(std::vector<extent_t>&& _extents,
std::vector<delta_info_t>&& _deltas) {
auto modify_time = seastar::lowres_system_clock::now();
for (auto& e: _extents) {
push_back(std::move(e), modify_time);
}
for (auto& d: _deltas) {
push_back(std::move(d));
}
type = transaction_type_t::MUTATE;
}
bool is_empty() const {
return extents.size() == 0 &&
deltas.size() == 0;
}
std::size_t get_delta_size() const {
auto delta_size = std::accumulate(
deltas.begin(), deltas.end(), 0,
[](uint64_t sum, auto& delta) {
return sum + delta.bl.length();
}
);
return delta_size;
}
void push_back(extent_t&& extent, sea_time_point &t) {
ceph_assert(t != NULL_TIME);
if (extents.size() == 0) {
assert(modify_time == NULL_TIME);
modify_time = t;
} else {
modify_time = get_average_time(modify_time, extents.size(), t, 1);
}
size.account(extent);
extents.push_back(std::move(extent));
}
void push_back(delta_info_t&& delta) {
size.account(delta);
deltas.push_back(std::move(delta));
}
};
std::ostream &operator<<(std::ostream&, const record_t&);
struct record_header_t {
transaction_type_t type;
uint32_t deltas; // number of deltas
uint32_t extents; // number of extents
mod_time_point_t modify_time;
DENC(record_header_t, v, p) {
DENC_START(1, 1, p);
denc(v.type, p);
denc(v.deltas, p);
denc(v.extents, p);
denc(v.modify_time, p);
DENC_FINISH(p);
}
};
std::ostream &operator<<(std::ostream&, const record_header_t&);
struct record_group_header_t {
uint32_t records;
extent_len_t mdlength; // block aligned, length of metadata
extent_len_t dlength; // block aligned, length of data
segment_nonce_t segment_nonce;// nonce of containing segment
journal_seq_t committed_to; // records prior to committed_to have been
// fully written, maybe in another segment.
checksum_t data_crc; // crc of data payload
DENC(record_group_header_t, v, p) {
DENC_START(1, 1, p);
denc(v.records, p);
denc(v.mdlength, p);
denc(v.dlength, p);
denc(v.segment_nonce, p);
denc(v.committed_to, p);
denc(v.data_crc, p);
DENC_FINISH(p);
}
};
std::ostream& operator<<(std::ostream&, const record_group_header_t&);
struct record_group_size_t {
extent_len_t plain_mdlength = 0; // mdlength without the group header
extent_len_t dlength = 0;
extent_len_t block_size = 0;
record_group_size_t() = default;
record_group_size_t(
const record_size_t& rsize,
extent_len_t block_size) {
account(rsize, block_size);
}
extent_len_t get_raw_mdlength() const;
extent_len_t get_mdlength() const {
assert(block_size > 0);
return p2roundup(get_raw_mdlength(), block_size);
}
extent_len_t get_encoded_length() const {
assert(block_size > 0);
assert(dlength % block_size == 0);
return get_mdlength() + dlength;
}
record_group_size_t get_encoded_length_after(
const record_size_t& rsize,
extent_len_t block_size) const {
record_group_size_t tmp = *this;
tmp.account(rsize, block_size);
return tmp;
}
double get_fullness() const {
assert(block_size > 0);
return ((double)(get_raw_mdlength() + dlength) /
get_encoded_length());
}
void account(const record_size_t& rsize,
extent_len_t block_size);
bool operator==(const record_group_size_t &) const = default;
};
std::ostream& operator<<(std::ostream&, const record_group_size_t&);
struct record_group_t {
std::vector<record_t> records;
record_group_size_t size;
record_group_t() = default;
record_group_t(
record_t&& record,
extent_len_t block_size) {
push_back(std::move(record), block_size);
}
std::size_t get_size() const {
return records.size();
}
void push_back(
record_t&& record,
extent_len_t block_size) {
size.account(record.size, block_size);
records.push_back(std::move(record));
assert(size.get_encoded_length() < SEGMENT_OFF_MAX);
}
void reserve(std::size_t limit) {
records.reserve(limit);
}
void clear() {
records.clear();
size = {};
}
};
std::ostream& operator<<(std::ostream&, const record_group_t&);
ceph::bufferlist encode_record(
record_t&& record,
extent_len_t block_size,
const journal_seq_t& committed_to,
segment_nonce_t current_segment_nonce);
ceph::bufferlist encode_records(
record_group_t& record_group,
const journal_seq_t& committed_to,
segment_nonce_t current_segment_nonce);
std::optional<record_group_header_t>
try_decode_records_header(
const ceph::bufferlist& header_bl,
segment_nonce_t expected_nonce);
bool validate_records_metadata(
const ceph::bufferlist& md_bl);
bool validate_records_data(
const record_group_header_t& header,
const ceph::bufferlist& data_bl);
struct record_extent_infos_t {
record_header_t header;
std::vector<extent_info_t> extent_infos;
};
std::optional<std::vector<record_extent_infos_t> >
try_decode_extent_infos(
const record_group_header_t& header,
const ceph::bufferlist& md_bl);
std::optional<std::vector<record_header_t>>
try_decode_record_headers(
const record_group_header_t& header,
const ceph::bufferlist& md_bl);
struct record_deltas_t {
paddr_t record_block_base;
std::vector<std::pair<sea_time_point, delta_info_t>> deltas;
};
std::optional<std::vector<record_deltas_t> >
try_decode_deltas(
const record_group_header_t& header,
const ceph::bufferlist& md_bl,
paddr_t record_block_base);
struct write_result_t {
journal_seq_t start_seq;
extent_len_t length;
journal_seq_t get_end_seq() const {
return journal_seq_t{
start_seq.segment_seq,
start_seq.offset.add_offset(length)};
}
};
std::ostream& operator<<(std::ostream&, const write_result_t&);
struct record_locator_t {
paddr_t record_block_base;
write_result_t write_result;
};
std::ostream& operator<<(std::ostream&, const record_locator_t&);
/// scan segment for end incrementally
struct scan_valid_records_cursor {
bool last_valid_header_found = false;
journal_seq_t seq;
journal_seq_t last_committed;
std::size_t num_consumed_records = 0;
struct found_record_group_t {
paddr_t offset;
record_group_header_t header;
bufferlist mdbuffer;
found_record_group_t(
paddr_t offset,
const record_group_header_t &header,
const bufferlist &mdbuffer)
: offset(offset), header(header), mdbuffer(mdbuffer) {}
};
std::deque<found_record_group_t> pending_record_groups;
bool is_complete() const {
return last_valid_header_found && pending_record_groups.empty();
}
segment_id_t get_segment_id() const {
return seq.offset.as_seg_paddr().get_segment_id();
}
segment_off_t get_segment_offset() const {
return seq.offset.as_seg_paddr().get_segment_off();
}
void increment_seq(segment_off_t off) {
auto& seg_addr = seq.offset.as_seg_paddr();
seg_addr.set_segment_off(
seg_addr.get_segment_off() + off);
}
void emplace_record_group(const record_group_header_t&, ceph::bufferlist&&);
void pop_record_group() {
assert(!pending_record_groups.empty());
++num_consumed_records;
pending_record_groups.pop_front();
}
scan_valid_records_cursor(
journal_seq_t seq)
: seq(seq) {}
};
std::ostream& operator<<(std::ostream&, const scan_valid_records_cursor&);
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::seastore_meta_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_id_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::paddr_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::journal_seq_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::delta_info_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::journal_tail_delta_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::record_header_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::record_group_header_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::extent_info_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_header_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::alloc_blk_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::alloc_delta_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_tail_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::data_category_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::delta_info_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::device_id_printer_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::extent_types_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::journal_seq_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::journal_tail_delta_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::laddr_list_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::omap_root_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::paddr_list_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::paddr_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::placement_hint_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::device_type_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::record_group_header_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::record_group_size_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::record_header_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::record_locator_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::record_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::rewrite_gen_printer_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::scan_valid_records_cursor> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::sea_time_point_printer_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::segment_header_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::segment_id_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::segment_seq_printer_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::segment_tail_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::segment_type_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::transaction_type_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::write_result_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<ceph::buffer::list> : fmt::ostream_formatter {};
#endif
| 62,336 | 27.980474 | 112 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/segment_manager/block.h"
#include "crimson/os/seastore/logging.h"
#ifdef HAVE_ZNS
#include "crimson/os/seastore/segment_manager/zbd.h"
SET_SUBSYS(seastore_device);
#endif
namespace crimson::os::seastore {
std::ostream& operator<<(std::ostream& out, const block_shard_info_t& sf)
{
out << "("
<< "size=" << sf.size
<< ", segments=" <<sf.segments
<< ", tracker_offset=" <<sf.tracker_offset
<< ", first_segment_offset=" <<sf.first_segment_offset
<<")";
return out;
}
std::ostream& operator<<(std::ostream& out, const block_sm_superblock_t& sb)
{
out << "superblock("
<< "shard_num=" << sb.shard_num
<< ", segment_size=" << sb.segment_size
<< ", block_size=" << sb.block_size
<< ", shard_info:";
for (auto &sf : sb.shard_infos) {
out << sf
<< ",";
}
out << "config=" << sb.config
<< ")";
return out;
}
std::ostream& operator<<(std::ostream &out, Segment::segment_state_t s)
{
using state_t = Segment::segment_state_t;
switch (s) {
case state_t::EMPTY:
return out << "EMPTY";
case state_t::OPEN:
return out << "OPEN";
case state_t::CLOSED:
return out << "CLOSED";
default:
return out << "INVALID_SEGMENT_STATE!";
}
}
seastar::future<crimson::os::seastore::SegmentManagerRef>
SegmentManager::get_segment_manager(
const std::string &device, device_type_t dtype)
{
#ifdef HAVE_ZNS
LOG_PREFIX(SegmentManager::get_segment_manager);
return seastar::do_with(
static_cast<size_t>(0),
[FNAME,
dtype,
device](auto &nr_zones) {
return seastar::open_file_dma(
device + "/block",
seastar::open_flags::rw
).then([FNAME,
dtype,
device,
&nr_zones](auto file) {
return seastar::do_with(
file,
[&nr_zones](auto &f) -> seastar::future<int> {
ceph_assert(f);
return f.ioctl(BLKGETNRZONES, (void *)&nr_zones);
});
}).then([FNAME,
dtype,
device,
&nr_zones](auto ret) -> crimson::os::seastore::SegmentManagerRef {
crimson::os::seastore::SegmentManagerRef sm;
INFO("Found {} zones.", nr_zones);
if (nr_zones != 0) {
return std::make_unique<
segment_manager::zbd::ZBDSegmentManager
>(device + "/block");
} else {
return std::make_unique<
segment_manager::block::BlockSegmentManager
>(device + "/block", dtype);
}
});
});
#else
return seastar::make_ready_future<crimson::os::seastore::SegmentManagerRef>(
std::make_unique<
segment_manager::block::BlockSegmentManager
>(device + "/block", dtype));
#endif
}
}
| 2,743 | 24.407407 | 78 | cc |
null | ceph-main/src/crimson/os/seastore/segment_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <seastar/core/future.hh>
#include "include/buffer_fwd.h"
#include "include/ceph_assert.h"
#include "crimson/common/config_proxy.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/osd/exceptions.h"
#include "device.h"
namespace crimson::os::seastore {
using std::vector;
struct block_shard_info_t {
std::size_t size;
std::size_t segments;
uint64_t tracker_offset;
uint64_t first_segment_offset;
DENC(block_shard_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.segments, p);
denc(v.tracker_offset, p);
denc(v.first_segment_offset, p);
DENC_FINISH(p);
}
};
struct block_sm_superblock_t {
unsigned int shard_num = 0;
size_t segment_size = 0;
size_t block_size = 0;
std::vector<block_shard_info_t> shard_infos;
device_config_t config;
DENC(block_sm_superblock_t, v, p) {
DENC_START(1, 1, p);
denc(v.shard_num, p);
denc(v.segment_size, p);
denc(v.block_size, p);
denc(v.shard_infos, p);
denc(v.config, p);
DENC_FINISH(p);
}
void validate() const {
ceph_assert(shard_num == seastar::smp::count);
ceph_assert(block_size > 0);
ceph_assert(segment_size > 0 &&
segment_size % block_size == 0);
ceph_assert_always(segment_size <= SEGMENT_OFF_MAX);
for (unsigned int i = 0; i < seastar::smp::count; i ++) {
ceph_assert(shard_infos[i].size > segment_size &&
shard_infos[i].size % block_size == 0);
ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX);
ceph_assert(shard_infos[i].segments > 0);
ceph_assert_always(shard_infos[i].segments <= DEVICE_SEGMENT_ID_MAX);
ceph_assert(shard_infos[i].tracker_offset > 0 &&
shard_infos[i].tracker_offset % block_size == 0);
ceph_assert(shard_infos[i].first_segment_offset > shard_infos[i].tracker_offset &&
shard_infos[i].first_segment_offset % block_size == 0);
}
ceph_assert(config.spec.magic != 0);
ceph_assert(get_default_backend_of_device(config.spec.dtype) ==
backend_type_t::SEGMENTED);
ceph_assert(config.spec.id <= DEVICE_ID_MAX_VALID);
if (!config.major_dev) {
ceph_assert(config.secondary_devices.size() == 0);
}
for (const auto& [k, v] : config.secondary_devices) {
ceph_assert(k != config.spec.id);
ceph_assert(k <= DEVICE_ID_MAX_VALID);
ceph_assert(k == v.id);
ceph_assert(v.magic != 0);
ceph_assert(v.dtype > device_type_t::NONE);
ceph_assert(v.dtype < device_type_t::NUM_TYPES);
}
}
};
std::ostream& operator<<(std::ostream&, const block_shard_info_t&);
std::ostream& operator<<(std::ostream&, const block_sm_superblock_t&);
class Segment : public boost::intrusive_ref_counter<
Segment,
boost::thread_unsafe_counter>{
public:
enum class segment_state_t : uint8_t {
EMPTY = 0,
OPEN = 1,
CLOSED = 2
};
/**
* get_segment_id
*/
virtual segment_id_t get_segment_id() const = 0;
/**
* min next write location
*/
virtual segment_off_t get_write_ptr() const = 0;
/**
* max capacity
*/
virtual segment_off_t get_write_capacity() const = 0;
/**
* close
*
* Closes segment for writes. Won't complete until
* outstanding writes to this segment are complete.
*/
using close_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual close_ertr::future<> close() = 0;
/**
* write
*
* @param offset offset of write, must be aligned to <> and >= write pointer, advances
* write pointer
* @param bl buffer to write, will be padded if not aligned
*/
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error, // media error or corruption
crimson::ct_error::invarg, // if offset is < write pointer or misaligned
crimson::ct_error::ebadf, // segment closed
crimson::ct_error::enospc // write exceeds segment size
>;
virtual write_ertr::future<> write(
segment_off_t offset, ceph::bufferlist bl) = 0;
/**
* advance_wp
*
* advance the segment write pointer,
* needed when writing at wp is strictly implemented. ex: ZBD backed segments
* @param offset: advance write pointer till the given offset
*/
virtual write_ertr::future<> advance_wp(
segment_off_t offset) = 0;
virtual ~Segment() {}
};
using SegmentRef = boost::intrusive_ptr<Segment>;
std::ostream& operator<<(std::ostream& out, Segment::segment_state_t);
constexpr size_t PADDR_SIZE = sizeof(paddr_t);
class SegmentManager;
using SegmentManagerRef = std::unique_ptr<SegmentManager>;
class SegmentManager : public Device {
public:
backend_type_t get_backend_type() const final {
return backend_type_t::SEGMENTED;
}
using open_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual open_ertr::future<SegmentRef> open(segment_id_t id) = 0;
using release_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
virtual release_ertr::future<> release(segment_id_t id) = 0;
/* Methods for discovering device geometry, segmentid set, etc */
virtual segment_off_t get_segment_size() const = 0;
virtual device_segment_id_t get_num_segments() const {
ceph_assert(get_available_size() % get_segment_size() == 0);
return ((device_segment_id_t)(get_available_size() / get_segment_size()));
}
virtual ~SegmentManager() {}
static seastar::future<SegmentManagerRef>
get_segment_manager(const std::string &device, device_type_t dtype);
};
}
WRITE_CLASS_DENC(
crimson::os::seastore::block_shard_info_t
)
WRITE_CLASS_DENC(
crimson::os::seastore::block_sm_superblock_t
)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::block_shard_info_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::block_sm_superblock_t> : fmt::ostream_formatter {};
#endif
| 6,414 | 28.562212 | 108 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager_group.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/logging.h"
SET_SUBSYS(seastore_journal);
namespace crimson::os::seastore {
SegmentManagerGroup::read_segment_tail_ret
SegmentManagerGroup::read_segment_tail(segment_id_t segment)
{
assert(has_device(segment.device_id()));
auto& segment_manager = *segment_managers[segment.device_id()];
return segment_manager.read(
paddr_t::make_seg_paddr(
segment,
segment_manager.get_segment_size() - get_rounded_tail_length()),
get_rounded_tail_length()
).handle_error(
read_segment_header_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in SegmentManagerGroup::read_segment_tail"
}
).safe_then([=, &segment_manager](bufferptr bptr) -> read_segment_tail_ret {
LOG_PREFIX(SegmentManagerGroup::read_segment_tail);
DEBUG("segment {} bptr size {}", segment, bptr.length());
segment_tail_t tail;
bufferlist bl;
bl.push_back(bptr);
DEBUG("segment {} block crc {}",
segment,
bl.begin().crc32c(segment_manager.get_block_size(), 0));
auto bp = bl.cbegin();
try {
decode(tail, bp);
} catch (ceph::buffer::error &e) {
DEBUG("segment {} unable to decode tail, skipping -- {}",
segment, e.what());
return crimson::ct_error::enodata::make();
}
DEBUG("segment {} tail {}", segment, tail);
return read_segment_tail_ret(
read_segment_tail_ertr::ready_future_marker{},
tail);
});
}
SegmentManagerGroup::read_segment_header_ret
SegmentManagerGroup::read_segment_header(segment_id_t segment)
{
assert(has_device(segment.device_id()));
auto& segment_manager = *segment_managers[segment.device_id()];
return segment_manager.read(
paddr_t::make_seg_paddr(segment, 0),
get_rounded_header_length()
).handle_error(
read_segment_header_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in SegmentManagerGroup::read_segment_header"
}
).safe_then([=, &segment_manager](bufferptr bptr) -> read_segment_header_ret {
LOG_PREFIX(SegmentManagerGroup::read_segment_header);
DEBUG("segment {} bptr size {}", segment, bptr.length());
segment_header_t header;
bufferlist bl;
bl.push_back(bptr);
DEBUG("segment {} block crc {}",
segment,
bl.begin().crc32c(segment_manager.get_block_size(), 0));
auto bp = bl.cbegin();
try {
decode(header, bp);
} catch (ceph::buffer::error &e) {
DEBUG("segment {} unable to decode header, skipping -- {}",
segment, e.what());
return crimson::ct_error::enodata::make();
}
DEBUG("segment {} header {}", segment, header);
return read_segment_header_ret(
read_segment_header_ertr::ready_future_marker{},
header);
});
}
SegmentManagerGroup::scan_valid_records_ret
SegmentManagerGroup::scan_valid_records(
scan_valid_records_cursor &cursor,
segment_nonce_t nonce,
size_t budget,
found_record_handler_t &handler)
{
LOG_PREFIX(SegmentManagerGroup::scan_valid_records);
assert(has_device(cursor.get_segment_id().device_id()));
auto& segment_manager =
*segment_managers[cursor.get_segment_id().device_id()];
if (cursor.get_segment_offset() == 0) {
INFO("start to scan segment {}", cursor.get_segment_id());
cursor.increment_seq(segment_manager.get_block_size());
}
DEBUG("starting at {}, budget={}", cursor, budget);
auto retref = std::make_unique<size_t>(0);
auto &budget_used = *retref;
return crimson::repeat(
[=, &cursor, &budget_used, &handler, this]() mutable
-> scan_valid_records_ertr::future<seastar::stop_iteration> {
return [=, &handler, &cursor, &budget_used, this] {
if (!cursor.last_valid_header_found) {
return read_validate_record_metadata(cursor.seq.offset, nonce
).safe_then([=, &cursor](auto md) {
if (!md) {
cursor.last_valid_header_found = true;
if (cursor.is_complete()) {
INFO("complete at {}, invalid record group metadata",
cursor);
} else {
DEBUG("found invalid record group metadata at {}, "
"processing {} pending record groups",
cursor.seq,
cursor.pending_record_groups.size());
}
return scan_valid_records_ertr::now();
} else {
auto& [header, md_bl] = *md;
DEBUG("found valid {} at {}", header, cursor.seq);
cursor.emplace_record_group(header, std::move(md_bl));
return scan_valid_records_ertr::now();
}
}).safe_then([=, &cursor, &budget_used, &handler, this] {
DEBUG("processing committed record groups until {}, {} pending",
cursor.last_committed,
cursor.pending_record_groups.size());
return crimson::repeat(
[=, &budget_used, &cursor, &handler, this] {
if (cursor.pending_record_groups.empty()) {
/* This is only possible if the segment is empty.
* A record's last_commited must be prior to its own
* location since it itself cannot yet have been committed
* at its own time of submission. Thus, the most recently
* read record must always fall after cursor.last_committed */
return scan_valid_records_ertr::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::yes);
}
auto &next = cursor.pending_record_groups.front();
journal_seq_t next_seq = {cursor.seq.segment_seq, next.offset};
if (cursor.last_committed == JOURNAL_SEQ_NULL ||
next_seq > cursor.last_committed) {
return scan_valid_records_ertr::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::yes);
}
return consume_next_records(cursor, handler, budget_used
).safe_then([] {
return scan_valid_records_ertr::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::no);
});
});
});
} else {
assert(!cursor.pending_record_groups.empty());
auto &next = cursor.pending_record_groups.front();
return read_validate_data(next.offset, next.header
).safe_then([this, FNAME, &budget_used, &cursor, &handler, &next](auto valid) {
if (!valid) {
INFO("complete at {}, invalid record group data at {}, {}",
cursor, next.offset, next.header);
cursor.pending_record_groups.clear();
return scan_valid_records_ertr::now();
}
return consume_next_records(cursor, handler, budget_used);
});
}
}().safe_then([=, &budget_used, &cursor] {
if (cursor.is_complete() || budget_used >= budget) {
DEBUG("finish at {}, budget_used={}, budget={}",
cursor, budget_used, budget);
return seastar::stop_iteration::yes;
} else {
return seastar::stop_iteration::no;
}
});
}).safe_then([retref=std::move(retref)]() mutable -> scan_valid_records_ret {
return scan_valid_records_ret(
scan_valid_records_ertr::ready_future_marker{},
std::move(*retref));
});
}
SegmentManagerGroup::read_validate_record_metadata_ret
SegmentManagerGroup::read_validate_record_metadata(
paddr_t start,
segment_nonce_t nonce)
{
LOG_PREFIX(SegmentManagerGroup::read_validate_record_metadata);
auto& seg_addr = start.as_seg_paddr();
assert(has_device(seg_addr.get_segment_id().device_id()));
auto& segment_manager = *segment_managers[seg_addr.get_segment_id().device_id()];
auto block_size = segment_manager.get_block_size();
auto segment_size = static_cast<int64_t>(segment_manager.get_segment_size());
if (seg_addr.get_segment_off() + block_size > segment_size) {
DEBUG("failed -- record group header block {}~4096 > segment_size {}", start, segment_size);
return read_validate_record_metadata_ret(
read_validate_record_metadata_ertr::ready_future_marker{},
std::nullopt);
}
TRACE("reading record group header block {}~4096", start);
return segment_manager.read(start, block_size
).safe_then([=, &segment_manager](bufferptr bptr) mutable
-> read_validate_record_metadata_ret {
auto block_size = segment_manager.get_block_size();
bufferlist bl;
bl.append(bptr);
auto maybe_header = try_decode_records_header(bl, nonce);
if (!maybe_header.has_value()) {
return read_validate_record_metadata_ret(
read_validate_record_metadata_ertr::ready_future_marker{},
std::nullopt);
}
auto& seg_addr = start.as_seg_paddr();
auto& header = *maybe_header;
if (header.mdlength < block_size ||
header.mdlength % block_size != 0 ||
header.dlength % block_size != 0 ||
(header.committed_to != JOURNAL_SEQ_NULL &&
header.committed_to.offset.as_seg_paddr().get_segment_off() % block_size != 0) ||
(seg_addr.get_segment_off() + header.mdlength + header.dlength > segment_size)) {
ERROR("failed, invalid record group header {}", start);
return crimson::ct_error::input_output_error::make();
}
if (header.mdlength == block_size) {
return read_validate_record_metadata_ret(
read_validate_record_metadata_ertr::ready_future_marker{},
std::make_pair(std::move(header), std::move(bl))
);
}
auto rest_start = paddr_t::make_seg_paddr(
seg_addr.get_segment_id(),
seg_addr.get_segment_off() + block_size
);
auto rest_len = header.mdlength - block_size;
TRACE("reading record group header rest {}~{}", rest_start, rest_len);
return segment_manager.read(rest_start, rest_len
).safe_then([header=std::move(header), bl=std::move(bl)
](auto&& bptail) mutable {
bl.push_back(bptail);
return read_validate_record_metadata_ret(
read_validate_record_metadata_ertr::ready_future_marker{},
std::make_pair(std::move(header), std::move(bl)));
});
}).safe_then([](auto p) {
if (p && validate_records_metadata(p->second)) {
return read_validate_record_metadata_ret(
read_validate_record_metadata_ertr::ready_future_marker{},
std::move(*p)
);
} else {
return read_validate_record_metadata_ret(
read_validate_record_metadata_ertr::ready_future_marker{},
std::nullopt);
}
});
}
SegmentManagerGroup::read_validate_data_ret
SegmentManagerGroup::read_validate_data(
paddr_t record_base,
const record_group_header_t &header)
{
LOG_PREFIX(SegmentManagerGroup::read_validate_data);
assert(has_device(record_base.get_device_id()));
auto& segment_manager = *segment_managers[record_base.get_device_id()];
auto data_addr = record_base.add_offset(header.mdlength);
TRACE("reading record group data blocks {}~{}", data_addr, header.dlength);
return segment_manager.read(
data_addr,
header.dlength
).safe_then([=, &header](auto bptr) {
bufferlist bl;
bl.append(bptr);
return validate_records_data(header, bl);
});
}
SegmentManagerGroup::consume_record_group_ertr::future<>
SegmentManagerGroup::consume_next_records(
scan_valid_records_cursor& cursor,
found_record_handler_t& handler,
std::size_t& budget_used)
{
LOG_PREFIX(SegmentManagerGroup::consume_next_records);
auto& next = cursor.pending_record_groups.front();
auto total_length = next.header.dlength + next.header.mdlength;
budget_used += total_length;
auto locator = record_locator_t{
next.offset.add_offset(next.header.mdlength),
write_result_t{
journal_seq_t{
cursor.seq.segment_seq,
next.offset
},
total_length
}
};
DEBUG("processing {} at {}, budget_used={}",
next.header, locator, budget_used);
return handler(
locator,
next.header,
next.mdbuffer
).safe_then([FNAME, &cursor] {
cursor.pop_record_group();
if (cursor.is_complete()) {
INFO("complete at {}, no more record group", cursor);
}
});
}
SegmentManagerGroup::find_journal_segment_headers_ret
SegmentManagerGroup::find_journal_segment_headers()
{
return seastar::do_with(
get_segment_managers(),
find_journal_segment_headers_ret_bare{},
[this](auto &sms, auto& ret) -> find_journal_segment_headers_ret
{
return crimson::do_for_each(sms,
[this, &ret](SegmentManager *sm)
{
LOG_PREFIX(SegmentManagerGroup::find_journal_segment_headers);
auto device_id = sm->get_device_id();
auto num_segments = sm->get_num_segments();
DEBUG("processing {} with {} segments",
device_id_printer_t{device_id}, num_segments);
return crimson::do_for_each(
boost::counting_iterator<device_segment_id_t>(0),
boost::counting_iterator<device_segment_id_t>(num_segments),
[this, &ret, device_id](device_segment_id_t d_segment_id)
{
segment_id_t segment_id{device_id, d_segment_id};
return read_segment_header(segment_id
).safe_then([segment_id, &ret](auto &&header) {
if (header.get_type() == segment_type_t::JOURNAL) {
ret.emplace_back(std::make_pair(segment_id, std::move(header)));
}
}).handle_error(
crimson::ct_error::enoent::handle([](auto) {
return find_journal_segment_headers_ertr::now();
}),
crimson::ct_error::enodata::handle([](auto) {
return find_journal_segment_headers_ertr::now();
}),
crimson::ct_error::input_output_error::pass_further{}
);
});
}).safe_then([&ret]() mutable {
return find_journal_segment_headers_ret{
find_journal_segment_headers_ertr::ready_future_marker{},
std::move(ret)};
});
});
}
} // namespace crimson::os::seastore
| 13,652 | 35.701613 | 96 | cc |
null | ceph-main/src/crimson/os/seastore/segment_manager_group.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <set>
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/segment_manager.h"
namespace crimson::os::seastore {
class SegmentManagerGroup {
public:
SegmentManagerGroup() {
segment_managers.resize(DEVICE_ID_MAX, nullptr);
}
const std::set<device_id_t>& get_device_ids() const {
return device_ids;
}
std::vector<SegmentManager*> get_segment_managers() const {
assert(device_ids.size());
std::vector<SegmentManager*> ret;
for (auto& device_id : device_ids) {
auto segment_manager = segment_managers[device_id];
assert(segment_manager->get_device_id() == device_id);
ret.emplace_back(segment_manager);
}
return ret;
}
void add_segment_manager(SegmentManager* segment_manager) {
auto device_id = segment_manager->get_device_id();
ceph_assert(!has_device(device_id));
if (!device_ids.empty()) {
auto existing_id = *device_ids.begin();
ceph_assert(segment_managers[existing_id]->get_device_type()
== segment_manager->get_device_type());
}
segment_managers[device_id] = segment_manager;
device_ids.insert(device_id);
}
void reset() {
segment_managers.clear();
segment_managers.resize(DEVICE_ID_MAX, nullptr);
device_ids.clear();
}
/**
* get device info
*
* Assume all segment managers share the same following information.
*/
extent_len_t get_block_size() const {
assert(device_ids.size());
return segment_managers[*device_ids.begin()]->get_block_size();
}
segment_off_t get_segment_size() const {
assert(device_ids.size());
return segment_managers[*device_ids.begin()]->get_segment_size();
}
const seastore_meta_t &get_meta() const {
assert(device_ids.size());
return segment_managers[*device_ids.begin()]->get_meta();
}
std::size_t get_rounded_header_length() const {
return p2roundup(
ceph::encoded_sizeof_bounded<segment_header_t>(),
(std::size_t)get_block_size());
}
std::size_t get_rounded_tail_length() const {
return p2roundup(
ceph::encoded_sizeof_bounded<segment_tail_t>(),
(std::size_t)get_block_size());
}
using read_segment_header_ertr = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::enodata,
crimson::ct_error::input_output_error
>;
using read_segment_header_ret = read_segment_header_ertr::future<
segment_header_t>;
read_segment_header_ret read_segment_header(segment_id_t segment);
using read_segment_tail_ertr = read_segment_header_ertr;
using read_segment_tail_ret = read_segment_tail_ertr::future<
segment_tail_t>;
read_segment_tail_ret read_segment_tail(segment_id_t segment);
using read_ertr = SegmentManager::read_ertr;
using scan_valid_records_ertr = read_ertr;
using scan_valid_records_ret = scan_valid_records_ertr::future<
size_t>;
using found_record_handler_t = std::function<
scan_valid_records_ertr::future<>(
record_locator_t record_locator,
// callee may assume header and bl will remain valid until
// returned future resolves
const record_group_header_t &header,
const bufferlist &mdbuf)>;
scan_valid_records_ret scan_valid_records(
scan_valid_records_cursor &cursor, ///< [in, out] cursor, updated during call
segment_nonce_t nonce, ///< [in] nonce for segment
size_t budget, ///< [in] max budget to use
found_record_handler_t &handler ///< [in] handler for records
); ///< @return used budget
/*
* read journal segment headers
*/
using find_journal_segment_headers_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using find_journal_segment_headers_ret_bare = std::vector<
std::pair<segment_id_t, segment_header_t>>;
using find_journal_segment_headers_ret = find_journal_segment_headers_ertr::future<
find_journal_segment_headers_ret_bare>;
find_journal_segment_headers_ret find_journal_segment_headers();
using open_ertr = SegmentManager::open_ertr;
open_ertr::future<SegmentRef> open(segment_id_t id) {
assert(has_device(id.device_id()));
return segment_managers[id.device_id()]->open(id);
}
using release_ertr = SegmentManager::release_ertr;
release_ertr::future<> release_segment(segment_id_t id) {
assert(has_device(id.device_id()));
return segment_managers[id.device_id()]->release(id);
}
private:
bool has_device(device_id_t id) const {
assert(id <= DEVICE_ID_MAX_VALID);
return device_ids.count(id) >= 1;
}
/// read record metadata for record starting at start
using read_validate_record_metadata_ertr = read_ertr;
using read_validate_record_metadata_ret =
read_validate_record_metadata_ertr::future<
std::optional<std::pair<record_group_header_t, bufferlist>>
>;
read_validate_record_metadata_ret read_validate_record_metadata(
paddr_t start,
segment_nonce_t nonce);
/// read and validate data
using read_validate_data_ertr = read_ertr;
using read_validate_data_ret = read_validate_data_ertr::future<bool>;
read_validate_data_ret read_validate_data(
paddr_t record_base,
const record_group_header_t &header ///< caller must ensure lifetime through
/// future resolution
);
using consume_record_group_ertr = scan_valid_records_ertr;
consume_record_group_ertr::future<> consume_next_records(
scan_valid_records_cursor& cursor,
found_record_handler_t& handler,
std::size_t& budget_used);
std::vector<SegmentManager*> segment_managers;
std::set<device_id_t> device_ids;
};
using SegmentManagerGroupRef = std::unique_ptr<SegmentManagerGroup>;
} // namespace crimson::os::seastore
| 5,917 | 32.247191 | 85 | h |
null | ceph-main/src/crimson/os/seastore/segment_seq_allocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
class AsyncCleaner;
}
namespace crimson::os::seastore::journal {
class SegmentedJournal;
}
namespace crimson::os::seastore {
class SegmentSeqAllocator {
public:
SegmentSeqAllocator(segment_type_t type)
: type(type) {}
segment_seq_t get_and_inc_next_segment_seq() {
return next_segment_seq++;
}
private:
void set_next_segment_seq(segment_seq_t seq) {
LOG_PREFIX(SegmentSeqAllocator::set_next_segment_seq);
SUBDEBUG(
seastore_journal,
"{}, next={}, cur={}",
type,
segment_seq_printer_t{seq},
segment_seq_printer_t{next_segment_seq});
assert(type == segment_type_t::JOURNAL
? seq >= next_segment_seq
: true);
if (seq > next_segment_seq)
next_segment_seq = seq;
}
segment_seq_t next_segment_seq = 0;
segment_type_t type = segment_type_t::NULL_SEG;
friend class journal::SegmentedJournal;
friend class SegmentCleaner;
};
using SegmentSeqAllocatorRef =
std::unique_ptr<SegmentSeqAllocator>;
};
| 1,222 | 22.980392 | 70 | h |
null | ceph-main/src/crimson/os/seastore/transaction.cc | #include "transaction.h"
#include "crimson/common/interruptible_future.h"
namespace crimson::interruptible {
template
thread_local interrupt_cond_t<::crimson::os::seastore::TransactionConflictCondition>
interrupt_cond<::crimson::os::seastore::TransactionConflictCondition>;
}
| 277 | 29.888889 | 84 | cc |
null | ceph-main/src/crimson/os/seastore/transaction.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive/list.hpp>
#include "crimson/common/log.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/ordering_handle.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/root_block.h"
namespace crimson::os::seastore {
class SeaStore;
class Transaction;
struct io_stat_t {
uint64_t num = 0;
uint64_t bytes = 0;
bool is_clear() const {
return (num == 0 && bytes == 0);
}
void increment(uint64_t _bytes) {
++num;
bytes += _bytes;
}
void increment_stat(const io_stat_t& stat) {
num += stat.num;
bytes += stat.bytes;
}
};
inline std::ostream& operator<<(std::ostream& out, const io_stat_t& stat) {
return out << stat.num << "(" << stat.bytes << "B)";
}
struct version_stat_t {
uint64_t num = 0;
uint64_t version = 0;
bool is_clear() const {
return (num == 0 && version == 0);
}
void increment(extent_version_t v) {
++num;
version += v;
}
void increment_stat(const version_stat_t& stat) {
num += stat.num;
version += stat.version;
}
};
/**
* Transaction
*
* Representation of in-progress mutation. Used exclusively through Cache methods.
*
* Transaction log levels:
* seastore_t
* - DEBUG: transaction create, conflict, commit events
* - TRACE: DEBUG details
* - seastore_cache logs
*/
class Transaction {
public:
using Ref = std::unique_ptr<Transaction>;
using on_destruct_func_t = std::function<void(Transaction&)>;
enum class get_extent_ret {
PRESENT,
ABSENT,
RETIRED
};
get_extent_ret get_extent(paddr_t addr, CachedExtentRef *out) {
LOG_PREFIX(Transaction::get_extent);
// it's possible that both write_set and retired_set contain
// this addr at the same time when addr is absolute and the
// corresponding extent is used to map existing extent on disk.
// So search write_set first.
if (auto iter = write_set.find_offset(addr);
iter != write_set.end()) {
if (out)
*out = CachedExtentRef(&*iter);
SUBTRACET(seastore_cache, "{} is present in write_set -- {}",
*this, addr, *iter);
assert((*out)->is_valid());
return get_extent_ret::PRESENT;
} else if (retired_set.count(addr)) {
return get_extent_ret::RETIRED;
} else if (
auto iter = read_set.find(addr);
iter != read_set.end()) {
// placeholder in read-set should be in the retired-set
// at the same time.
assert(iter->ref->get_type() != extent_types_t::RETIRED_PLACEHOLDER);
if (out)
*out = iter->ref;
SUBTRACET(seastore_cache, "{} is present in read_set -- {}",
*this, addr, *(iter->ref));
return get_extent_ret::PRESENT;
} else {
return get_extent_ret::ABSENT;
}
}
void add_to_retired_set(CachedExtentRef ref) {
ceph_assert(!is_weak());
if (ref->is_exist_clean() ||
ref->is_exist_mutation_pending()) {
existing_block_stats.dec(ref);
ref->set_invalid(*this);
write_set.erase(*ref);
} else if (ref->is_initial_pending()) {
ref->set_invalid(*this);
write_set.erase(*ref);
} else if (ref->is_mutation_pending()) {
ref->set_invalid(*this);
write_set.erase(*ref);
assert(ref->prior_instance);
retired_set.insert(ref->prior_instance);
assert(read_set.count(ref->prior_instance->get_paddr()));
ref->prior_instance.reset();
} else {
// && retired_set.count(ref->get_paddr()) == 0
// If it's already in the set, insert here will be a noop,
// which is what we want.
retired_set.insert(ref);
}
}
void add_to_read_set(CachedExtentRef ref) {
if (is_weak()) return;
assert(ref->is_valid());
auto it = ref->transactions.lower_bound(
this, read_set_item_t<Transaction>::trans_cmp_t());
if (it != ref->transactions.end() && it->t == this) return;
auto [iter, inserted] = read_set.emplace(this, ref);
ceph_assert(inserted);
ref->transactions.insert_before(
it, const_cast<read_set_item_t<Transaction>&>(*iter));
}
void add_fresh_extent(
CachedExtentRef ref) {
ceph_assert(!is_weak());
if (ref->is_exist_clean()) {
existing_block_stats.inc(ref);
existing_block_list.push_back(ref);
} else if (ref->get_paddr().is_delayed()) {
assert(ref->get_paddr() == make_delayed_temp_paddr(0));
assert(ref->is_logical());
ref->set_paddr(make_delayed_temp_paddr(delayed_temp_offset));
delayed_temp_offset += ref->get_length();
delayed_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
fresh_block_stats.increment(ref->get_length());
} else if (ref->get_paddr().is_absolute()) {
pre_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
fresh_block_stats.increment(ref->get_length());
} else {
if (likely(ref->get_paddr() == make_record_relative_paddr(0))) {
ref->set_paddr(make_record_relative_paddr(offset));
} else {
ceph_assert(ref->get_paddr().is_fake());
}
offset += ref->get_length();
inline_block_list.push_back(ref);
fresh_block_stats.increment(ref->get_length());
}
write_set.insert(*ref);
if (is_backref_node(ref->get_type()))
fresh_backref_extents++;
}
uint64_t get_num_fresh_backref() const {
return fresh_backref_extents;
}
void mark_delayed_extent_inline(LogicalCachedExtentRef& ref) {
write_set.erase(*ref);
assert(ref->get_paddr().is_delayed());
ref->set_paddr(make_record_relative_paddr(offset),
/* need_update_mapping: */ true);
offset += ref->get_length();
inline_block_list.push_back(ref);
write_set.insert(*ref);
}
void mark_delayed_extent_ool(LogicalCachedExtentRef& ref) {
written_ool_block_list.push_back(ref);
}
void update_delayed_ool_extent_addr(LogicalCachedExtentRef& ref,
paddr_t final_addr) {
write_set.erase(*ref);
assert(ref->get_paddr().is_delayed());
ref->set_paddr(final_addr, /* need_update_mapping: */ true);
assert(!ref->get_paddr().is_null());
assert(!ref->is_inline());
write_set.insert(*ref);
}
void mark_allocated_extent_ool(LogicalCachedExtentRef& ref) {
assert(ref->get_paddr().is_absolute());
assert(!ref->is_inline());
written_ool_block_list.push_back(ref);
}
void add_mutated_extent(CachedExtentRef ref) {
ceph_assert(!is_weak());
assert(ref->is_exist_mutation_pending() ||
read_set.count(ref->prior_instance->get_paddr()));
mutated_block_list.push_back(ref);
if (!ref->is_exist_mutation_pending()) {
write_set.insert(*ref);
} else {
assert(write_set.find_offset(ref->get_paddr()) !=
write_set.end());
}
}
void replace_placeholder(CachedExtent& placeholder, CachedExtent& extent) {
ceph_assert(!is_weak());
assert(placeholder.get_type() == extent_types_t::RETIRED_PLACEHOLDER);
assert(extent.get_type() != extent_types_t::RETIRED_PLACEHOLDER);
assert(extent.get_type() != extent_types_t::ROOT);
assert(extent.get_paddr() == placeholder.get_paddr());
{
auto where = read_set.find(placeholder.get_paddr());
assert(where != read_set.end());
assert(where->ref.get() == &placeholder);
where = read_set.erase(where);
auto it = read_set.emplace_hint(where, this, &extent);
extent.transactions.insert(const_cast<read_set_item_t<Transaction>&>(*it));
}
{
auto where = retired_set.find(&placeholder);
assert(where != retired_set.end());
assert(where->get() == &placeholder);
where = retired_set.erase(where);
retired_set.emplace_hint(where, &extent);
}
}
auto get_delayed_alloc_list() {
std::list<LogicalCachedExtentRef> ret;
for (auto& extent : delayed_alloc_list) {
// delayed extents may be invalidated
if (extent->is_valid()) {
ret.push_back(std::move(extent));
} else {
++num_delayed_invalid_extents;
}
}
delayed_alloc_list.clear();
return ret;
}
auto get_valid_pre_alloc_list() {
std::list<LogicalCachedExtentRef> ret;
assert(num_allocated_invalid_extents == 0);
for (auto& extent : pre_alloc_list) {
if (extent->is_valid()) {
ret.push_back(extent);
} else {
++num_allocated_invalid_extents;
}
}
return ret;
}
const auto &get_inline_block_list() {
return inline_block_list;
}
const auto &get_mutated_block_list() {
return mutated_block_list;
}
const auto &get_existing_block_list() {
return existing_block_list;
}
const auto &get_retired_set() {
return retired_set;
}
bool is_retired(paddr_t paddr, extent_len_t len) {
if (retired_set.empty()) {
return false;
}
auto iter = retired_set.lower_bound(paddr);
if (iter == retired_set.end() ||
(*iter)->get_paddr() > paddr) {
assert(iter != retired_set.begin());
--iter;
}
auto retired_paddr = (*iter)->get_paddr();
auto retired_length = (*iter)->get_length();
return retired_paddr <= paddr &&
retired_paddr.add_offset(retired_length) >= paddr.add_offset(len);
}
template <typename F>
auto for_each_fresh_block(F &&f) const {
std::for_each(written_ool_block_list.begin(), written_ool_block_list.end(), f);
std::for_each(inline_block_list.begin(), inline_block_list.end(), f);
}
const io_stat_t& get_fresh_block_stats() const {
return fresh_block_stats;
}
using src_t = transaction_type_t;
src_t get_src() const {
return src;
}
bool is_weak() const {
return weak;
}
void test_set_conflict() {
conflicted = true;
}
bool is_conflicted() const {
return conflicted;
}
auto &get_handle() {
return handle;
}
Transaction(
OrderingHandle &&handle,
bool weak,
src_t src,
journal_seq_t initiated_after,
on_destruct_func_t&& f,
transaction_id_t trans_id
) : weak(weak),
handle(std::move(handle)),
on_destruct(std::move(f)),
src(src),
trans_id(trans_id)
{}
void invalidate_clear_write_set() {
for (auto &&i: write_set) {
i.set_invalid(*this);
}
write_set.clear();
}
~Transaction() {
on_destruct(*this);
invalidate_clear_write_set();
}
friend class crimson::os::seastore::SeaStore;
friend class TransactionConflictCondition;
void reset_preserve_handle(journal_seq_t initiated_after) {
root.reset();
offset = 0;
delayed_temp_offset = 0;
read_set.clear();
fresh_backref_extents = 0;
invalidate_clear_write_set();
mutated_block_list.clear();
fresh_block_stats = {};
num_delayed_invalid_extents = 0;
num_allocated_invalid_extents = 0;
delayed_alloc_list.clear();
inline_block_list.clear();
written_ool_block_list.clear();
pre_alloc_list.clear();
retired_set.clear();
existing_block_list.clear();
existing_block_stats = {};
onode_tree_stats = {};
omap_tree_stats = {};
lba_tree_stats = {};
backref_tree_stats = {};
ool_write_stats = {};
rewrite_version_stats = {};
conflicted = false;
if (!has_reset) {
has_reset = true;
}
}
bool did_reset() const {
return has_reset;
}
struct tree_stats_t {
uint64_t depth = 0;
uint64_t num_inserts = 0;
uint64_t num_erases = 0;
uint64_t num_updates = 0;
int64_t extents_num_delta = 0;
bool is_clear() const {
return (depth == 0 &&
num_inserts == 0 &&
num_erases == 0 &&
num_updates == 0 &&
extents_num_delta == 0);
}
};
tree_stats_t& get_onode_tree_stats() {
return onode_tree_stats;
}
tree_stats_t& get_omap_tree_stats() {
return omap_tree_stats;
}
tree_stats_t& get_lba_tree_stats() {
return lba_tree_stats;
}
tree_stats_t& get_backref_tree_stats() {
return backref_tree_stats;
}
struct ool_write_stats_t {
io_stat_t extents;
uint64_t md_bytes = 0;
uint64_t num_records = 0;
uint64_t get_data_bytes() const {
return extents.bytes;
}
bool is_clear() const {
return (extents.is_clear() &&
md_bytes == 0 &&
num_records == 0);
}
};
ool_write_stats_t& get_ool_write_stats() {
return ool_write_stats;
}
version_stat_t& get_rewrite_version_stats() {
return rewrite_version_stats;
}
struct existing_block_stats_t {
uint64_t valid_num = 0;
uint64_t clean_num = 0;
uint64_t mutated_num = 0;
void inc(const CachedExtentRef &ref) {
valid_num++;
if (ref->is_exist_clean()) {
clean_num++;
} else {
mutated_num++;
}
}
void dec(const CachedExtentRef &ref) {
valid_num--;
if (ref->is_exist_clean()) {
clean_num--;
} else {
mutated_num--;
}
}
};
existing_block_stats_t& get_existing_block_stats() {
return existing_block_stats;
}
transaction_id_t get_trans_id() const {
return trans_id;
}
private:
friend class Cache;
friend Ref make_test_transaction();
/**
* If set, *this may not be used to perform writes and will not provide
* consistentency allowing operations using to avoid maintaining a read_set.
*/
const bool weak;
RootBlockRef root; ///< ref to root if read or written by transaction
device_off_t offset = 0; ///< relative offset of next block
device_off_t delayed_temp_offset = 0;
/**
* read_set
*
* Holds a reference (with a refcount) to every extent read via *this.
* Submitting a transaction mutating any contained extent/addr will
* invalidate *this.
*/
read_set_t<Transaction> read_set; ///< set of extents read by paddr
uint64_t fresh_backref_extents = 0; // counter of new backref extents
/**
* write_set
*
* Contains a reference (without a refcount) to every extent mutated
* as part of *this. No contained extent may be referenced outside
* of *this. Every contained extent will be in one of inline_block_list,
* written_ool_block_list or/and pre_alloc_list, mutated_block_list,
* or delayed_alloc_list.
*/
ExtentIndex write_set;
/**
* lists of fresh blocks, holds refcounts, subset of write_set
*/
io_stat_t fresh_block_stats;
uint64_t num_delayed_invalid_extents = 0;
uint64_t num_allocated_invalid_extents = 0;
/// blocks that will be committed with journal record inline
std::list<CachedExtentRef> inline_block_list;
/// blocks that will be committed with out-of-line record
std::list<CachedExtentRef> written_ool_block_list;
/// blocks with delayed allocation, may become inline or ool above
std::list<LogicalCachedExtentRef> delayed_alloc_list;
/// Extents with pre-allocated addresses,
/// will be added to written_ool_block_list after write
std::list<LogicalCachedExtentRef> pre_alloc_list;
/// list of mutated blocks, holds refcounts, subset of write_set
std::list<CachedExtentRef> mutated_block_list;
/// partial blocks of extents on disk, with data and refcounts
std::list<CachedExtentRef> existing_block_list;
existing_block_stats_t existing_block_stats;
/**
* retire_set
*
* Set of extents retired by *this.
*/
pextent_set_t retired_set;
/// stats to collect when commit or invalidate
tree_stats_t onode_tree_stats;
tree_stats_t omap_tree_stats; // exclude omap tree depth
tree_stats_t lba_tree_stats;
tree_stats_t backref_tree_stats;
ool_write_stats_t ool_write_stats;
version_stat_t rewrite_version_stats;
bool conflicted = false;
bool has_reset = false;
OrderingHandle handle;
on_destruct_func_t on_destruct;
const src_t src;
transaction_id_t trans_id = TRANS_ID_NULL;
};
using TransactionRef = Transaction::Ref;
/// Should only be used with dummy staged-fltree node extent manager
inline TransactionRef make_test_transaction() {
static transaction_id_t next_id = 0;
return std::make_unique<Transaction>(
get_dummy_ordering_handle(),
false,
Transaction::src_t::MUTATE,
JOURNAL_SEQ_NULL,
[](Transaction&) {},
++next_id
);
}
struct TransactionConflictCondition {
class transaction_conflict final : public std::exception {
public:
const char* what() const noexcept final {
return "transaction conflict detected";
}
};
public:
TransactionConflictCondition(Transaction &t) : t(t) {}
template <typename Fut>
std::optional<Fut> may_interrupt() {
if (t.conflicted) {
return seastar::futurize<Fut>::make_exception_future(
transaction_conflict());
} else {
return std::optional<Fut>();
}
}
template <typename T>
static constexpr bool is_interruption_v =
std::is_same_v<T, transaction_conflict>;
static bool is_interruption(std::exception_ptr& eptr) {
return *eptr.__cxa_exception_type() == typeid(transaction_conflict);
}
private:
Transaction &t;
};
using trans_intr = crimson::interruptible::interruptor<
TransactionConflictCondition
>;
template <typename E>
using trans_iertr =
crimson::interruptible::interruptible_errorator<
TransactionConflictCondition,
E
>;
template <typename F, typename... Args>
auto with_trans_intr(Transaction &t, F &&f, Args&&... args) {
return trans_intr::with_interruption_to_error<crimson::ct_error::eagain>(
std::move(f),
TransactionConflictCondition(t),
t,
std::forward<Args>(args)...);
}
template <typename T>
using with_trans_ertr = typename T::base_ertr::template extend<crimson::ct_error::eagain>;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::io_stat_t> : fmt::ostream_formatter {};
#endif
| 17,815 | 26.24159 | 96 | h |
null | ceph-main/src/crimson/os/seastore/transaction_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab expandtab
#include "include/denc.h"
#include "include/intarith.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/journal/circular_bounded_journal.h"
#include "crimson/os/seastore/lba_manager/btree/lba_btree_node.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
/*
* TransactionManager logs
*
* levels:
* - INFO: major initiation, closing operations
* - DEBUG: major extent related operations, INFO details
* - TRACE: DEBUG details
* - seastore_t logs
*/
SET_SUBSYS(seastore_tm);
namespace crimson::os::seastore {
TransactionManager::TransactionManager(
JournalRef _journal,
CacheRef _cache,
LBAManagerRef _lba_manager,
ExtentPlacementManagerRef &&_epm,
BackrefManagerRef&& _backref_manager)
: cache(std::move(_cache)),
lba_manager(std::move(_lba_manager)),
journal(std::move(_journal)),
epm(std::move(_epm)),
backref_manager(std::move(_backref_manager))
{
epm->set_extent_callback(this);
journal->set_write_pipeline(&write_pipeline);
}
TransactionManager::mkfs_ertr::future<> TransactionManager::mkfs()
{
LOG_PREFIX(TransactionManager::mkfs);
INFO("enter");
return epm->mount(
).safe_then([this] {
return journal->open_for_mkfs();
}).safe_then([this](auto start_seq) {
journal->get_trimmer().update_journal_tails(start_seq, start_seq);
journal->get_trimmer().set_journal_head(start_seq);
return epm->open_for_write();
}).safe_then([this, FNAME]() {
return with_transaction_intr(
Transaction::src_t::MUTATE,
"mkfs_tm",
[this, FNAME](auto& t)
{
cache->init();
return cache->mkfs(t
).si_then([this, &t] {
return lba_manager->mkfs(t);
}).si_then([this, &t] {
return backref_manager->mkfs(t);
}).si_then([this, FNAME, &t] {
INFOT("submitting mkfs transaction", t);
return submit_transaction_direct(t);
});
}).handle_error(
crimson::ct_error::eagain::handle([] {
ceph_assert(0 == "eagain impossible");
return mkfs_ertr::now();
}),
mkfs_ertr::pass_further{}
);
}).safe_then([this] {
return close();
}).safe_then([FNAME] {
INFO("completed");
});
}
TransactionManager::mount_ertr::future<> TransactionManager::mount()
{
LOG_PREFIX(TransactionManager::mount);
INFO("enter");
cache->init();
return epm->mount(
).safe_then([this] {
return journal->replay(
[this](
const auto &offsets,
const auto &e,
const journal_seq_t &dirty_tail,
const journal_seq_t &alloc_tail,
sea_time_point modify_time)
{
auto start_seq = offsets.write_result.start_seq;
return cache->replay_delta(
start_seq,
offsets.record_block_base,
e,
dirty_tail,
alloc_tail,
modify_time);
});
}).safe_then([this] {
return journal->open_for_mount();
}).safe_then([this](auto start_seq) {
journal->get_trimmer().set_journal_head(start_seq);
return with_transaction_weak(
"mount",
[this](auto &t)
{
return cache->init_cached_extents(t, [this](auto &t, auto &e) {
if (is_backref_node(e->get_type())) {
return backref_manager->init_cached_extent(t, e);
} else {
return lba_manager->init_cached_extent(t, e);
}
}).si_then([this, &t] {
epm->start_scan_space();
return backref_manager->scan_mapped_space(
t,
[this](
paddr_t paddr,
paddr_t backref_key,
extent_len_t len,
extent_types_t type,
laddr_t laddr) {
if (is_backref_node(type)) {
assert(laddr == L_ADDR_NULL);
assert(backref_key != P_ADDR_NULL);
backref_manager->cache_new_backref_extent(paddr, backref_key, type);
cache->update_tree_extents_num(type, 1);
epm->mark_space_used(paddr, len);
} else if (laddr == L_ADDR_NULL) {
assert(backref_key == P_ADDR_NULL);
cache->update_tree_extents_num(type, -1);
epm->mark_space_free(paddr, len);
} else {
assert(backref_key == P_ADDR_NULL);
cache->update_tree_extents_num(type, 1);
epm->mark_space_used(paddr, len);
}
});
});
});
}).safe_then([this] {
return epm->open_for_write();
}).safe_then([FNAME, this] {
epm->start_background();
INFO("completed");
}).handle_error(
mount_ertr::pass_further{},
crimson::ct_error::all_same_way([] {
ceph_assert(0 == "unhandled error");
return mount_ertr::now();
})
);
}
TransactionManager::close_ertr::future<> TransactionManager::close() {
LOG_PREFIX(TransactionManager::close);
INFO("enter");
return epm->stop_background(
).then([this] {
return cache->close();
}).safe_then([this] {
cache->dump_contents();
return journal->close();
}).safe_then([this] {
return epm->close();
}).safe_then([FNAME] {
INFO("completed");
return seastar::now();
});
}
TransactionManager::ref_ret TransactionManager::inc_ref(
Transaction &t,
LogicalCachedExtentRef &ref)
{
LOG_PREFIX(TransactionManager::inc_ref);
TRACET("{}", t, *ref);
return lba_manager->incref_extent(t, ref->get_laddr()
).si_then([FNAME, ref, &t](auto result) {
DEBUGT("extent refcount is incremented to {} -- {}",
t, result.refcount, *ref);
return result.refcount;
}).handle_error_interruptible(
ref_iertr::pass_further{},
ct_error::all_same_way([](auto e) {
ceph_assert(0 == "unhandled error, TODO");
}));
}
TransactionManager::ref_ret TransactionManager::inc_ref(
Transaction &t,
laddr_t offset)
{
LOG_PREFIX(TransactionManager::inc_ref);
TRACET("{}", t, offset);
return lba_manager->incref_extent(t, offset
).si_then([FNAME, offset, &t](auto result) {
DEBUGT("extent refcount is incremented to {} -- {}~{}, {}",
t, result.refcount, offset, result.length, result.addr);
return result.refcount;
});
}
TransactionManager::ref_ret TransactionManager::dec_ref(
Transaction &t,
LogicalCachedExtentRef &ref)
{
LOG_PREFIX(TransactionManager::dec_ref);
TRACET("{}", t, *ref);
return lba_manager->decref_extent(t, ref->get_laddr()
).si_then([this, FNAME, &t, ref](auto result) {
DEBUGT("extent refcount is decremented to {} -- {}",
t, result.refcount, *ref);
if (result.refcount == 0) {
cache->retire_extent(t, ref);
}
return result.refcount;
});
}
TransactionManager::ref_ret TransactionManager::dec_ref(
Transaction &t,
laddr_t offset)
{
LOG_PREFIX(TransactionManager::dec_ref);
TRACET("{}", t, offset);
return lba_manager->decref_extent(t, offset
).si_then([this, FNAME, offset, &t](auto result) -> ref_ret {
DEBUGT("extent refcount is decremented to {} -- {}~{}, {}",
t, result.refcount, offset, result.length, result.addr);
if (result.refcount == 0 && !result.addr.is_zero()) {
return cache->retire_extent_addr(
t, result.addr, result.length
).si_then([] {
return ref_ret(
interruptible::ready_future_marker{},
0);
});
} else {
return ref_ret(
interruptible::ready_future_marker{},
result.refcount);
}
});
}
TransactionManager::refs_ret TransactionManager::dec_ref(
Transaction &t,
std::vector<laddr_t> offsets)
{
LOG_PREFIX(TransactionManager::dec_ref);
DEBUG("{} offsets", offsets.size());
return seastar::do_with(std::move(offsets), std::vector<unsigned>(),
[this, &t] (auto &&offsets, auto &refcnt) {
return trans_intr::do_for_each(offsets.begin(), offsets.end(),
[this, &t, &refcnt] (auto &laddr) {
return this->dec_ref(t, laddr).si_then([&refcnt] (auto ref) {
refcnt.push_back(ref);
return ref_iertr::now();
});
}).si_then([&refcnt] {
return ref_iertr::make_ready_future<std::vector<unsigned>>(std::move(refcnt));
});
});
}
TransactionManager::submit_transaction_iertr::future<>
TransactionManager::submit_transaction(
Transaction &t)
{
LOG_PREFIX(TransactionManager::submit_transaction);
SUBTRACET(seastore_t, "start", t);
return trans_intr::make_interruptible(
t.get_handle().enter(write_pipeline.reserve_projected_usage)
).then_interruptible([this, FNAME, &t] {
auto dispatch_result = epm->dispatch_delayed_extents(t);
auto projected_usage = dispatch_result.usage;
SUBTRACET(seastore_t, "waiting for projected_usage: {}", t, projected_usage);
return trans_intr::make_interruptible(
epm->reserve_projected_usage(projected_usage)
).then_interruptible([this, &t, dispatch_result = std::move(dispatch_result)] {
return do_submit_transaction(t, std::move(dispatch_result));
}).finally([this, FNAME, projected_usage, &t] {
SUBTRACET(seastore_t, "releasing projected_usage: {}", t, projected_usage);
epm->release_projected_usage(projected_usage);
});
});
}
TransactionManager::submit_transaction_direct_ret
TransactionManager::submit_transaction_direct(
Transaction &tref,
std::optional<journal_seq_t> trim_alloc_to)
{
return do_submit_transaction(
tref,
epm->dispatch_delayed_extents(tref),
trim_alloc_to);
}
TransactionManager::submit_transaction_direct_ret
TransactionManager::do_submit_transaction(
Transaction &tref,
ExtentPlacementManager::dispatch_result_t dispatch_result,
std::optional<journal_seq_t> trim_alloc_to)
{
LOG_PREFIX(TransactionManager::do_submit_transaction);
SUBTRACET(seastore_t, "start", tref);
return trans_intr::make_interruptible(
tref.get_handle().enter(write_pipeline.ool_writes)
).then_interruptible([this, FNAME, &tref,
dispatch_result = std::move(dispatch_result)] {
return seastar::do_with(std::move(dispatch_result),
[this, FNAME, &tref](auto &dispatch_result) {
return epm->write_delayed_ool_extents(tref, dispatch_result.alloc_map
).si_then([this, FNAME, &tref, &dispatch_result] {
SUBTRACET(seastore_t, "update delayed extent mappings", tref);
return lba_manager->update_mappings(tref, dispatch_result.delayed_extents);
}).handle_error_interruptible(
crimson::ct_error::input_output_error::pass_further(),
crimson::ct_error::assert_all("invalid error")
);
});
}).si_then([this, FNAME, &tref] {
auto allocated_extents = tref.get_valid_pre_alloc_list();
auto num_extents = allocated_extents.size();
SUBTRACET(seastore_t, "process {} allocated extents", tref, num_extents);
return epm->write_preallocated_ool_extents(tref, allocated_extents
).handle_error_interruptible(
crimson::ct_error::input_output_error::pass_further(),
crimson::ct_error::assert_all("invalid error")
);
}).si_then([this, FNAME, &tref] {
SUBTRACET(seastore_t, "about to prepare", tref);
return tref.get_handle().enter(write_pipeline.prepare);
}).si_then([this, FNAME, &tref, trim_alloc_to=std::move(trim_alloc_to)]() mutable
-> submit_transaction_iertr::future<> {
if (trim_alloc_to && *trim_alloc_to != JOURNAL_SEQ_NULL) {
cache->trim_backref_bufs(*trim_alloc_to);
}
auto record = cache->prepare_record(
tref,
journal->get_trimmer().get_journal_head(),
journal->get_trimmer().get_dirty_tail());
tref.get_handle().maybe_release_collection_lock();
SUBTRACET(seastore_t, "about to submit to journal", tref);
return journal->submit_record(std::move(record), tref.get_handle()
).safe_then([this, FNAME, &tref](auto submit_result) mutable {
SUBDEBUGT(seastore_t, "committed with {}", tref, submit_result);
auto start_seq = submit_result.write_result.start_seq;
journal->get_trimmer().set_journal_head(start_seq);
cache->complete_commit(
tref,
submit_result.record_block_base,
start_seq);
std::vector<CachedExtentRef> lba_to_clear;
std::vector<CachedExtentRef> backref_to_clear;
lba_to_clear.reserve(tref.get_retired_set().size());
backref_to_clear.reserve(tref.get_retired_set().size());
for (auto &e: tref.get_retired_set()) {
if (e->is_logical() || is_lba_node(e->get_type()))
lba_to_clear.push_back(e);
else if (is_backref_node(e->get_type()))
backref_to_clear.push_back(e);
}
journal->get_trimmer().update_journal_tails(
cache->get_oldest_dirty_from().value_or(start_seq),
cache->get_oldest_backref_dirty_from().value_or(start_seq));
return journal->finish_commit(tref.get_src()
).then([&tref] {
return tref.get_handle().complete();
});
}).handle_error(
submit_transaction_iertr::pass_further{},
crimson::ct_error::all_same_way([](auto e) {
ceph_assert(0 == "Hit error submitting to journal");
})
);
}).finally([&tref]() {
tref.get_handle().exit();
});
}
seastar::future<> TransactionManager::flush(OrderingHandle &handle)
{
LOG_PREFIX(TransactionManager::flush);
SUBDEBUG(seastore_t, "H{} start", (void*)&handle);
return handle.enter(write_pipeline.reserve_projected_usage
).then([this, &handle] {
return handle.enter(write_pipeline.ool_writes);
}).then([this, &handle] {
return handle.enter(write_pipeline.prepare);
}).then([this, &handle] {
handle.maybe_release_collection_lock();
return journal->flush(handle);
}).then([FNAME, &handle] {
SUBDEBUG(seastore_t, "H{} completed", (void*)&handle);
});
}
TransactionManager::get_next_dirty_extents_ret
TransactionManager::get_next_dirty_extents(
Transaction &t,
journal_seq_t seq,
size_t max_bytes)
{
LOG_PREFIX(TransactionManager::get_next_dirty_extents);
DEBUGT("max_bytes={}B, seq={}", t, max_bytes, seq);
return cache->get_next_dirty_extents(t, seq, max_bytes);
}
TransactionManager::rewrite_extent_ret
TransactionManager::rewrite_logical_extent(
Transaction& t,
LogicalCachedExtentRef extent)
{
LOG_PREFIX(TransactionManager::rewrite_logical_extent);
if (extent->has_been_invalidated()) {
ERRORT("extent has been invalidated -- {}", t, *extent);
ceph_abort();
}
TRACET("rewriting extent -- {}", t, *extent);
auto lextent = extent->cast<LogicalCachedExtent>();
cache->retire_extent(t, extent);
auto nlextent = cache->alloc_new_extent_by_type(
t,
lextent->get_type(),
lextent->get_length(),
lextent->get_user_hint(),
// get target rewrite generation
lextent->get_rewrite_generation())->cast<LogicalCachedExtent>();
lextent->get_bptr().copy_out(
0,
lextent->get_length(),
nlextent->get_bptr().c_str());
nlextent->set_laddr(lextent->get_laddr());
nlextent->set_modify_time(lextent->get_modify_time());
DEBUGT("rewriting logical extent -- {} to {}", t, *lextent, *nlextent);
/* This update_mapping is, strictly speaking, unnecessary for delayed_alloc
* extents since we're going to do it again once we either do the ool write
* or allocate a relative inline addr. TODO: refactor AsyncCleaner to
* avoid this complication. */
return lba_manager->update_mapping(
t,
lextent->get_laddr(),
lextent->get_paddr(),
nlextent->get_paddr(),
nlextent.get());
}
TransactionManager::rewrite_extent_ret TransactionManager::rewrite_extent(
Transaction &t,
CachedExtentRef extent,
rewrite_gen_t target_generation,
sea_time_point modify_time)
{
LOG_PREFIX(TransactionManager::rewrite_extent);
{
auto updated = cache->update_extent_from_transaction(t, extent);
if (!updated) {
DEBUGT("extent is already retired, skipping -- {}", t, *extent);
return rewrite_extent_iertr::now();
}
extent = updated;
ceph_assert(!extent->is_pending_io());
}
assert(extent->is_valid() && !extent->is_initial_pending());
if (extent->is_dirty()) {
extent->set_target_rewrite_generation(INIT_GENERATION);
} else {
extent->set_target_rewrite_generation(target_generation);
ceph_assert(modify_time != NULL_TIME);
extent->set_modify_time(modify_time);
}
t.get_rewrite_version_stats().increment(extent->get_version());
if (is_backref_node(extent->get_type())) {
DEBUGT("rewriting backref extent -- {}", t, *extent);
return backref_manager->rewrite_extent(t, extent);
}
if (extent->get_type() == extent_types_t::ROOT) {
DEBUGT("rewriting root extent -- {}", t, *extent);
cache->duplicate_for_write(t, extent);
return rewrite_extent_iertr::now();
}
if (extent->is_logical()) {
return rewrite_logical_extent(t, extent->cast<LogicalCachedExtent>());
} else {
DEBUGT("rewriting physical extent -- {}", t, *extent);
return lba_manager->rewrite_extent(t, extent);
}
}
TransactionManager::get_extents_if_live_ret
TransactionManager::get_extents_if_live(
Transaction &t,
extent_types_t type,
paddr_t paddr,
laddr_t laddr,
extent_len_t len)
{
LOG_PREFIX(TransactionManager::get_extent_if_live);
TRACET("{} {}~{} {}", t, type, laddr, len, paddr);
// This only works with segments to check if alive,
// as parallel transactions may split the extent at the same time.
ceph_assert(paddr.get_addr_type() == paddr_types_t::SEGMENT);
return cache->get_extent_if_cached(t, paddr, type
).si_then([=, this, &t](auto extent)
-> get_extents_if_live_ret {
if (extent && extent->get_length() == len) {
DEBUGT("{} {}~{} {} is live in cache -- {}",
t, type, laddr, len, paddr, *extent);
std::list<CachedExtentRef> res;
res.emplace_back(std::move(extent));
return get_extents_if_live_ret(
interruptible::ready_future_marker{},
res);
}
if (is_logical_type(type)) {
return lba_manager->get_mappings(
t,
laddr,
len
).si_then([=, this, &t](lba_pin_list_t pin_list) {
return seastar::do_with(
std::list<CachedExtentRef>(),
[=, this, &t, pin_list=std::move(pin_list)](
std::list<CachedExtentRef> &list) mutable
{
auto paddr_seg_id = paddr.as_seg_paddr().get_segment_id();
return trans_intr::parallel_for_each(
pin_list,
[=, this, &list, &t](
LBAMappingRef &pin) -> Cache::get_extent_iertr::future<>
{
auto pin_paddr = pin->get_val();
auto &pin_seg_paddr = pin_paddr.as_seg_paddr();
auto pin_paddr_seg_id = pin_seg_paddr.get_segment_id();
auto pin_len = pin->get_length();
if (pin_paddr_seg_id != paddr_seg_id) {
return seastar::now();
}
// Only extent split can happen during the lookup
ceph_assert(pin_seg_paddr >= paddr &&
pin_seg_paddr.add_offset(pin_len) <= paddr.add_offset(len));
return read_pin_by_type(t, std::move(pin), type
).si_then([&list](auto ret) {
list.emplace_back(std::move(ret));
return seastar::now();
});
}).si_then([&list] {
return get_extents_if_live_ret(
interruptible::ready_future_marker{},
std::move(list));
});
});
}).handle_error_interruptible(crimson::ct_error::enoent::handle([] {
return get_extents_if_live_ret(
interruptible::ready_future_marker{},
std::list<CachedExtentRef>());
}), crimson::ct_error::pass_further_all{});
} else {
return lba_manager->get_physical_extent_if_live(
t,
type,
paddr,
laddr,
len
).si_then([=, &t](auto ret) {
std::list<CachedExtentRef> res;
if (ret) {
DEBUGT("{} {}~{} {} is live as physical extent -- {}",
t, type, laddr, len, paddr, *ret);
res.emplace_back(std::move(ret));
} else {
DEBUGT("{} {}~{} {} is not live as physical extent",
t, type, laddr, len, paddr);
}
return get_extents_if_live_ret(
interruptible::ready_future_marker{},
std::move(res));
});
}
});
}
TransactionManager::~TransactionManager() {}
TransactionManagerRef make_transaction_manager(
Device *primary_device,
const std::vector<Device*> &secondary_devices,
bool is_test)
{
auto epm = std::make_unique<ExtentPlacementManager>();
auto cache = std::make_unique<Cache>(*epm);
auto lba_manager = lba_manager::create_lba_manager(*cache);
auto sms = std::make_unique<SegmentManagerGroup>();
auto rbs = std::make_unique<RBMDeviceGroup>();
auto backref_manager = create_backref_manager(*cache);
SegmentManagerGroupRef cold_sms = nullptr;
std::vector<SegmentProvider*> segment_providers_by_id{DEVICE_ID_MAX, nullptr};
auto p_backend_type = primary_device->get_backend_type();
if (p_backend_type == backend_type_t::SEGMENTED) {
auto dtype = primary_device->get_device_type();
ceph_assert(dtype != device_type_t::HDD &&
dtype != device_type_t::EPHEMERAL_COLD);
sms->add_segment_manager(static_cast<SegmentManager*>(primary_device));
} else {
auto rbm = std::make_unique<BlockRBManager>(
static_cast<RBMDevice*>(primary_device), "", is_test);
rbs->add_rb_manager(std::move(rbm));
}
for (auto &p_dev : secondary_devices) {
if (p_dev->get_backend_type() == backend_type_t::SEGMENTED) {
if (p_dev->get_device_type() == primary_device->get_device_type()) {
sms->add_segment_manager(static_cast<SegmentManager*>(p_dev));
} else {
if (!cold_sms) {
cold_sms = std::make_unique<SegmentManagerGroup>();
}
cold_sms->add_segment_manager(static_cast<SegmentManager*>(p_dev));
}
} else {
auto rbm = std::make_unique<BlockRBManager>(
static_cast<RBMDevice*>(p_dev), "", is_test);
rbs->add_rb_manager(std::move(rbm));
}
}
auto journal_type = p_backend_type;
device_off_t roll_size;
device_off_t roll_start;
if (journal_type == journal_type_t::SEGMENTED) {
roll_size = static_cast<SegmentManager*>(primary_device)->get_segment_size();
roll_start = 0;
} else {
roll_size = static_cast<random_block_device::RBMDevice*>(primary_device)
->get_journal_size() - primary_device->get_block_size();
// see CircularBoundedJournal::get_records_start()
roll_start = static_cast<random_block_device::RBMDevice*>(primary_device)
->get_shard_journal_start() + primary_device->get_block_size();
ceph_assert_always(roll_size <= DEVICE_OFF_MAX);
ceph_assert_always((std::size_t)roll_size + roll_start <=
primary_device->get_available_size());
}
ceph_assert(roll_size % primary_device->get_block_size() == 0);
ceph_assert(roll_start % primary_device->get_block_size() == 0);
bool cleaner_is_detailed;
SegmentCleaner::config_t cleaner_config;
JournalTrimmerImpl::config_t trimmer_config;
if (is_test) {
cleaner_is_detailed = true;
cleaner_config = SegmentCleaner::config_t::get_test();
trimmer_config = JournalTrimmerImpl::config_t::get_test(
roll_size, journal_type);
} else {
cleaner_is_detailed = false;
cleaner_config = SegmentCleaner::config_t::get_default();
trimmer_config = JournalTrimmerImpl::config_t::get_default(
roll_size, journal_type);
}
auto journal_trimmer = JournalTrimmerImpl::create(
*backref_manager, trimmer_config,
journal_type, roll_start, roll_size);
AsyncCleanerRef cleaner;
JournalRef journal;
SegmentCleanerRef cold_segment_cleaner = nullptr;
if (cold_sms) {
cold_segment_cleaner = SegmentCleaner::create(
cleaner_config,
std::move(cold_sms),
*backref_manager,
epm->get_ool_segment_seq_allocator(),
cleaner_is_detailed,
/* is_cold = */ true);
if (journal_type == journal_type_t::SEGMENTED) {
for (auto id : cold_segment_cleaner->get_device_ids()) {
segment_providers_by_id[id] =
static_cast<SegmentProvider*>(cold_segment_cleaner.get());
}
}
}
if (journal_type == journal_type_t::SEGMENTED) {
cleaner = SegmentCleaner::create(
cleaner_config,
std::move(sms),
*backref_manager,
epm->get_ool_segment_seq_allocator(),
cleaner_is_detailed);
auto segment_cleaner = static_cast<SegmentCleaner*>(cleaner.get());
for (auto id : segment_cleaner->get_device_ids()) {
segment_providers_by_id[id] =
static_cast<SegmentProvider*>(segment_cleaner);
}
segment_cleaner->set_journal_trimmer(*journal_trimmer);
journal = journal::make_segmented(
*segment_cleaner,
*journal_trimmer);
} else {
cleaner = RBMCleaner::create(
std::move(rbs),
*backref_manager,
cleaner_is_detailed);
journal = journal::make_circularbounded(
*journal_trimmer,
static_cast<random_block_device::RBMDevice*>(primary_device),
"");
}
cache->set_segment_providers(std::move(segment_providers_by_id));
epm->init(std::move(journal_trimmer),
std::move(cleaner),
std::move(cold_segment_cleaner));
epm->set_primary_device(primary_device);
return std::make_unique<TransactionManager>(
std::move(journal),
std::move(cache),
std::move(lba_manager),
std::move(epm),
std::move(backref_manager));
}
}
| 25,265 | 32.244737 | 86 | cc |
null | ceph-main/src/crimson/os/seastore/transaction_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <optional>
#include <vector>
#include <utility>
#include <functional>
#include <boost/intrusive_ptr.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/backref_manager.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/extent_placement_manager.h"
#include "crimson/os/seastore/device.h"
namespace crimson::os::seastore {
class Journal;
template <typename F>
auto repeat_eagain(F &&f) {
return seastar::do_with(
std::forward<F>(f),
[](auto &f)
{
return crimson::repeat([&f] {
return std::invoke(f
).safe_then([] {
return seastar::stop_iteration::yes;
}).handle_error(
[](const crimson::ct_error::eagain &e) {
return seastar::stop_iteration::no;
},
crimson::ct_error::pass_further_all{}
);
});
});
}
/**
* TransactionManager
*
* Abstraction hiding reading and writing to persistence.
* Exposes transaction based interface with read isolation.
*/
class TransactionManager : public ExtentCallbackInterface {
public:
TransactionManager(
JournalRef journal,
CacheRef cache,
LBAManagerRef lba_manager,
ExtentPlacementManagerRef &&epm,
BackrefManagerRef&& backref_manager);
/// Writes initial metadata to disk
using mkfs_ertr = base_ertr;
mkfs_ertr::future<> mkfs();
/// Reads initial metadata from disk
using mount_ertr = base_ertr;
mount_ertr::future<> mount();
/// Closes transaction_manager
using close_ertr = base_ertr;
close_ertr::future<> close();
/// Resets transaction
void reset_transaction_preserve_handle(Transaction &t) {
return cache->reset_transaction_preserve_handle(t);
}
/**
* get_pin
*
* Get the logical pin at offset
*/
using get_pin_iertr = LBAManager::get_mapping_iertr;
using get_pin_ret = LBAManager::get_mapping_iertr::future<LBAMappingRef>;
get_pin_ret get_pin(
Transaction &t,
laddr_t offset) {
LOG_PREFIX(TransactionManager::get_pin);
SUBTRACET(seastore_tm, "{}", t, offset);
return lba_manager->get_mapping(t, offset);
}
/**
* get_pins
*
* Get logical pins overlapping offset~length
*/
using get_pins_iertr = LBAManager::get_mappings_iertr;
using get_pins_ret = get_pins_iertr::future<lba_pin_list_t>;
get_pins_ret get_pins(
Transaction &t,
laddr_t offset,
extent_len_t length) {
LOG_PREFIX(TransactionManager::get_pins);
SUBDEBUGT(seastore_tm, "{}~{}", t, offset, length);
return lba_manager->get_mappings(
t, offset, length);
}
/**
* read_extent
*
* Read extent of type T at offset~length
*/
using read_extent_iertr = get_pin_iertr;
template <typename T>
using read_extent_ret = read_extent_iertr::future<
TCachedExtentRef<T>>;
template <typename T>
read_extent_ret<T> read_extent(
Transaction &t,
laddr_t offset,
extent_len_t length) {
LOG_PREFIX(TransactionManager::read_extent);
SUBTRACET(seastore_tm, "{}~{}", t, offset, length);
return get_pin(
t, offset
).si_then([this, FNAME, &t, offset, length] (auto pin)
-> read_extent_ret<T> {
if (length != pin->get_length() || !pin->get_val().is_real()) {
SUBERRORT(seastore_tm,
"offset {} len {} got wrong pin {}",
t, offset, length, *pin);
ceph_assert(0 == "Should be impossible");
}
return this->read_pin<T>(t, std::move(pin));
});
}
/**
* read_extent
*
* Read extent of type T at offset
*/
template <typename T>
read_extent_ret<T> read_extent(
Transaction &t,
laddr_t offset) {
LOG_PREFIX(TransactionManager::read_extent);
SUBTRACET(seastore_tm, "{}", t, offset);
return get_pin(
t, offset
).si_then([this, FNAME, &t, offset] (auto pin)
-> read_extent_ret<T> {
if (!pin->get_val().is_real()) {
SUBERRORT(seastore_tm,
"offset {} got wrong pin {}",
t, offset, *pin);
ceph_assert(0 == "Should be impossible");
}
return this->read_pin<T>(t, std::move(pin));
});
}
template <typename T>
base_iertr::future<TCachedExtentRef<T>> read_pin(
Transaction &t,
LBAMappingRef pin)
{
auto v = pin->get_logical_extent(t);
if (v.has_child()) {
return v.get_child_fut().safe_then([](auto extent) {
return extent->template cast<T>();
});
} else {
return pin_to_extent<T>(t, std::move(pin));
}
}
base_iertr::future<LogicalCachedExtentRef> read_pin_by_type(
Transaction &t,
LBAMappingRef pin,
extent_types_t type)
{
auto v = pin->get_logical_extent(t);
if (v.has_child()) {
return std::move(v.get_child_fut());
} else {
return pin_to_extent_by_type(t, std::move(pin), type);
}
}
/// Obtain mutable copy of extent
LogicalCachedExtentRef get_mutable_extent(Transaction &t, LogicalCachedExtentRef ref) {
LOG_PREFIX(TransactionManager::get_mutable_extent);
auto ret = cache->duplicate_for_write(
t,
ref)->cast<LogicalCachedExtent>();
if (!ret->has_laddr()) {
SUBDEBUGT(seastore_tm,
"duplicating extent for write -- {} -> {}",
t,
*ref,
*ret);
ret->set_laddr(ref->get_laddr());
} else {
SUBTRACET(seastore_tm,
"extent is already duplicated -- {}",
t,
*ref);
assert(ref->is_mutable());
assert(&*ref == &*ret);
}
return ret;
}
using ref_iertr = LBAManager::ref_iertr;
using ref_ret = ref_iertr::future<unsigned>;
/// Add refcount for ref
ref_ret inc_ref(
Transaction &t,
LogicalCachedExtentRef &ref);
/// Add refcount for offset
ref_ret inc_ref(
Transaction &t,
laddr_t offset);
/// Remove refcount for ref
ref_ret dec_ref(
Transaction &t,
LogicalCachedExtentRef &ref);
/// Remove refcount for offset
ref_ret dec_ref(
Transaction &t,
laddr_t offset);
/// remove refcount for list of offset
using refs_ret = ref_iertr::future<std::vector<unsigned>>;
refs_ret dec_ref(
Transaction &t,
std::vector<laddr_t> offsets);
/**
* alloc_extent
*
* Allocates a new block of type T with the minimum lba range of size len
* greater than laddr_hint.
*/
using alloc_extent_iertr = LBAManager::alloc_extent_iertr;
template <typename T>
using alloc_extent_ret = alloc_extent_iertr::future<TCachedExtentRef<T>>;
template <typename T>
alloc_extent_ret<T> alloc_extent(
Transaction &t,
laddr_t laddr_hint,
extent_len_t len,
placement_hint_t placement_hint = placement_hint_t::HOT) {
LOG_PREFIX(TransactionManager::alloc_extent);
SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}",
t, T::TYPE, len, placement_hint, laddr_hint);
ceph_assert(is_aligned(laddr_hint, epm->get_block_size()));
auto ext = cache->alloc_new_extent<T>(
t,
len,
placement_hint,
INIT_GENERATION);
return lba_manager->alloc_extent(
t,
laddr_hint,
len,
ext->get_paddr(),
ext.get()
).si_then([ext=std::move(ext), laddr_hint, &t](auto &&) mutable {
LOG_PREFIX(TransactionManager::alloc_extent);
SUBDEBUGT(seastore_tm, "new extent: {}, laddr_hint: {}", t, *ext, laddr_hint);
return alloc_extent_iertr::make_ready_future<TCachedExtentRef<T>>(
std::move(ext));
});
}
/**
* remap_pin
*
* Remap original extent to new extents.
* Return the pins of new extent.
*/
struct remap_entry {
extent_len_t offset;
extent_len_t len;
remap_entry(extent_len_t _offset, extent_len_t _len) {
offset = _offset;
len = _len;
}
};
using remap_pin_iertr = base_iertr;
template <std::size_t N>
using remap_pin_ret = remap_pin_iertr::future<std::array<LBAMappingRef, N>>;
template <typename T, std::size_t N>
remap_pin_ret<N> remap_pin(
Transaction &t,
LBAMappingRef &&pin,
std::array<remap_entry, N> remaps) {
#ifndef NDEBUG
std::sort(remaps.begin(), remaps.end(),
[](remap_entry x, remap_entry y) {
return x.offset < y.offset;
});
auto original_len = pin->get_length();
extent_len_t total_remap_len = 0;
extent_len_t last_offset = 0;
extent_len_t last_len = 0;
for (auto &remap : remaps) {
auto remap_offset = remap.offset;
auto remap_len = remap.len;
total_remap_len += remap.len;
ceph_assert(remap_offset >= (last_offset + last_len));
last_offset = remap_offset;
last_len = remap_len;
}
ceph_assert(total_remap_len < original_len);
#endif
// FIXME: paddr can be absolute and pending
ceph_assert(pin->get_val().is_absolute());
return cache->get_extent_if_cached(
t, pin->get_val(), T::TYPE
).si_then([this, &t, remaps,
original_laddr = pin->get_key(),
original_paddr = pin->get_val(),
original_len = pin->get_length()](auto ext) {
std::optional<ceph::bufferptr> original_bptr;
LOG_PREFIX(TransactionManager::remap_pin);
SUBDEBUGT(seastore_tm,
"original laddr: {}, original paddr: {}, original length: {},"
" remap to {} extents",
t, original_laddr, original_paddr, original_len, remaps.size());
if (ext) {
// FIXME: cannot and will not remap a dirty extent for now.
ceph_assert(!ext->is_dirty());
ceph_assert(!ext->is_mutable());
ceph_assert(ext->get_length() == original_len);
original_bptr = ext->get_bptr();
}
return seastar::do_with(
std::array<LBAMappingRef, N>(),
0,
std::move(original_bptr),
std::vector<remap_entry>(remaps.begin(), remaps.end()),
[this, &t, original_laddr, original_paddr, original_len]
(auto &ret, auto &count, auto &original_bptr, auto &remaps) {
return dec_ref(t, original_laddr
).si_then([this, &t, &original_bptr, &ret, &count, &remaps,
original_laddr, original_paddr, original_len](auto) {
return trans_intr::do_for_each(
remaps.begin(),
remaps.end(),
[this, &t, &original_bptr, &ret, &count,
original_laddr, original_paddr, original_len](auto &remap) {
LOG_PREFIX(TransactionManager::remap_pin);
auto remap_offset = remap.offset;
auto remap_len = remap.len;
auto remap_laddr = original_laddr + remap_offset;
auto remap_paddr = original_paddr.add_offset(remap_offset);
ceph_assert(remap_len < original_len);
ceph_assert(remap_offset + remap_len <= original_len);
ceph_assert(remap_len != 0);
ceph_assert(remap_offset % cache->get_block_size() == 0);
ceph_assert(remap_len % cache->get_block_size() == 0);
SUBDEBUGT(seastore_tm,
"remap laddr: {}, remap paddr: {}, remap length: {}", t,
remap_laddr, remap_paddr, remap_len);
return alloc_remapped_extent<T>(
t,
remap_laddr,
remap_paddr,
remap_len,
original_laddr,
std::move(original_bptr)
).si_then([&ret, &count, remap_laddr](auto &&npin) {
ceph_assert(npin->get_key() == remap_laddr);
ret[count++] = std::move(npin);
});
});
}).handle_error_interruptible(
remap_pin_iertr::pass_further{},
crimson::ct_error::assert_all{
"TransactionManager::remap_pin hit invalid error"
}
).si_then([&ret, &count] {
ceph_assert(count == N);
return remap_pin_iertr::make_ready_future<
std::array<LBAMappingRef, N>>(std::move(ret));
});
});
});
}
using reserve_extent_iertr = alloc_extent_iertr;
using reserve_extent_ret = reserve_extent_iertr::future<LBAMappingRef>;
reserve_extent_ret reserve_region(
Transaction &t,
laddr_t hint,
extent_len_t len) {
LOG_PREFIX(TransactionManager::reserve_region);
SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}", t, len, hint);
ceph_assert(is_aligned(hint, epm->get_block_size()));
return lba_manager->alloc_extent(
t,
hint,
len,
P_ADDR_ZERO,
nullptr);
}
/* alloc_extents
*
* allocates more than one new blocks of type T.
*/
using alloc_extents_iertr = alloc_extent_iertr;
template<class T>
alloc_extents_iertr::future<std::vector<TCachedExtentRef<T>>>
alloc_extents(
Transaction &t,
laddr_t hint,
extent_len_t len,
int num) {
LOG_PREFIX(TransactionManager::alloc_extents);
SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}, num={}",
t, len, hint, num);
return seastar::do_with(std::vector<TCachedExtentRef<T>>(),
[this, &t, hint, len, num] (auto &extents) {
return trans_intr::do_for_each(
boost::make_counting_iterator(0),
boost::make_counting_iterator(num),
[this, &t, len, hint, &extents] (auto i) {
return alloc_extent<T>(t, hint, len).si_then(
[&extents](auto &&node) {
extents.push_back(node);
});
}).si_then([&extents] {
return alloc_extents_iertr::make_ready_future
<std::vector<TCachedExtentRef<T>>>(std::move(extents));
});
});
}
/**
* submit_transaction
*
* Atomically submits transaction to persistence
*/
using submit_transaction_iertr = base_iertr;
submit_transaction_iertr::future<> submit_transaction(Transaction &);
/**
* flush
*
* Block until all outstanding IOs on handle are committed.
* Note, flush() machinery must go through the same pipeline
* stages and locks as submit_transaction.
*/
seastar::future<> flush(OrderingHandle &handle);
/*
* ExtentCallbackInterface
*/
/// weak transaction should be type READ
TransactionRef create_transaction(
Transaction::src_t src,
const char* name,
bool is_weak=false) final {
return cache->create_transaction(src, name, is_weak);
}
using ExtentCallbackInterface::submit_transaction_direct_ret;
submit_transaction_direct_ret submit_transaction_direct(
Transaction &t,
std::optional<journal_seq_t> seq_to_trim = std::nullopt) final;
using ExtentCallbackInterface::get_next_dirty_extents_ret;
get_next_dirty_extents_ret get_next_dirty_extents(
Transaction &t,
journal_seq_t seq,
size_t max_bytes) final;
using ExtentCallbackInterface::rewrite_extent_ret;
rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent,
rewrite_gen_t target_generation,
sea_time_point modify_time) final;
using ExtentCallbackInterface::get_extents_if_live_ret;
get_extents_if_live_ret get_extents_if_live(
Transaction &t,
extent_types_t type,
paddr_t paddr,
laddr_t laddr,
extent_len_t len) final;
/**
* read_root_meta
*
* Read root block meta entry for key.
*/
using read_root_meta_iertr = base_iertr;
using read_root_meta_bare = std::optional<std::string>;
using read_root_meta_ret = read_root_meta_iertr::future<
read_root_meta_bare>;
read_root_meta_ret read_root_meta(
Transaction &t,
const std::string &key) {
return cache->get_root(
t
).si_then([&key, &t](auto root) {
LOG_PREFIX(TransactionManager::read_root_meta);
auto meta = root->root.get_meta();
auto iter = meta.find(key);
if (iter == meta.end()) {
SUBDEBUGT(seastore_tm, "{} -> nullopt", t, key);
return seastar::make_ready_future<read_root_meta_bare>(std::nullopt);
} else {
SUBDEBUGT(seastore_tm, "{} -> {}", t, key, iter->second);
return seastar::make_ready_future<read_root_meta_bare>(iter->second);
}
});
}
/**
* update_root_meta
*
* Update root block meta entry for key to value.
*/
using update_root_meta_iertr = base_iertr;
using update_root_meta_ret = update_root_meta_iertr::future<>;
update_root_meta_ret update_root_meta(
Transaction& t,
const std::string& key,
const std::string& value) {
LOG_PREFIX(TransactionManager::update_root_meta);
SUBDEBUGT(seastore_tm, "seastore_tm, {} -> {}", t, key, value);
return cache->get_root(
t
).si_then([this, &t, &key, &value](RootBlockRef root) {
root = cache->duplicate_for_write(t, root)->cast<RootBlock>();
auto meta = root->root.get_meta();
meta[key] = value;
root->root.set_meta(meta);
return seastar::now();
});
}
/**
* read_onode_root
*
* Get onode-tree root logical address
*/
using read_onode_root_iertr = base_iertr;
using read_onode_root_ret = read_onode_root_iertr::future<laddr_t>;
read_onode_root_ret read_onode_root(Transaction &t) {
return cache->get_root(t).si_then([&t](auto croot) {
LOG_PREFIX(TransactionManager::read_onode_root);
laddr_t ret = croot->get_root().onode_root;
SUBTRACET(seastore_tm, "{}", t, ret);
return ret;
});
}
/**
* write_onode_root
*
* Write onode-tree root logical address, must be called after read.
*/
void write_onode_root(Transaction &t, laddr_t addr) {
LOG_PREFIX(TransactionManager::write_onode_root);
SUBDEBUGT(seastore_tm, "{}", t, addr);
auto croot = cache->get_root_fast(t);
croot = cache->duplicate_for_write(t, croot)->cast<RootBlock>();
croot->get_root().onode_root = addr;
}
/**
* read_collection_root
*
* Get collection root addr
*/
using read_collection_root_iertr = base_iertr;
using read_collection_root_ret = read_collection_root_iertr::future<
coll_root_t>;
read_collection_root_ret read_collection_root(Transaction &t) {
return cache->get_root(t).si_then([&t](auto croot) {
LOG_PREFIX(TransactionManager::read_collection_root);
auto ret = croot->get_root().collection_root.get();
SUBTRACET(seastore_tm, "{}~{}",
t, ret.get_location(), ret.get_size());
return ret;
});
}
/**
* write_collection_root
*
* Update collection root addr
*/
void write_collection_root(Transaction &t, coll_root_t cmroot) {
LOG_PREFIX(TransactionManager::write_collection_root);
SUBDEBUGT(seastore_tm, "{}~{}",
t, cmroot.get_location(), cmroot.get_size());
auto croot = cache->get_root_fast(t);
croot = cache->duplicate_for_write(t, croot)->cast<RootBlock>();
croot->get_root().collection_root.update(cmroot);
}
extent_len_t get_block_size() const {
return epm->get_block_size();
}
store_statfs_t store_stat() const {
return epm->get_stat();
}
~TransactionManager();
private:
friend class Transaction;
CacheRef cache;
LBAManagerRef lba_manager;
JournalRef journal;
ExtentPlacementManagerRef epm;
BackrefManagerRef backref_manager;
WritePipeline write_pipeline;
rewrite_extent_ret rewrite_logical_extent(
Transaction& t,
LogicalCachedExtentRef extent);
submit_transaction_direct_ret do_submit_transaction(
Transaction &t,
ExtentPlacementManager::dispatch_result_t dispatch_result,
std::optional<journal_seq_t> seq_to_trim = std::nullopt);
/**
* pin_to_extent
*
* Get extent mapped at pin.
*/
using pin_to_extent_iertr = base_iertr;
template <typename T>
using pin_to_extent_ret = pin_to_extent_iertr::future<
TCachedExtentRef<T>>;
template <typename T>
pin_to_extent_ret<T> pin_to_extent(
Transaction &t,
LBAMappingRef pin) {
LOG_PREFIX(TransactionManager::pin_to_extent);
SUBTRACET(seastore_tm, "getting extent {}", t, *pin);
static_assert(is_logical_type(T::TYPE));
using ret = pin_to_extent_ret<T>;
auto &pref = *pin;
return cache->get_absent_extent<T>(
t,
pref.get_val(),
pref.get_length(),
[pin=std::move(pin)]
(T &extent) mutable {
assert(!extent.has_laddr());
assert(!extent.has_been_invalidated());
assert(!pin->has_been_invalidated());
assert(pin->get_parent());
pin->link_child(&extent);
extent.set_laddr(pin->get_key());
}
).si_then([FNAME, &t](auto ref) mutable -> ret {
SUBTRACET(seastore_tm, "got extent -- {}", t, *ref);
assert(ref->is_fully_loaded());
return pin_to_extent_ret<T>(
interruptible::ready_future_marker{},
std::move(ref));
});
}
/**
* pin_to_extent_by_type
*
* Get extent mapped at pin.
*/
using pin_to_extent_by_type_ret = pin_to_extent_iertr::future<
LogicalCachedExtentRef>;
pin_to_extent_by_type_ret pin_to_extent_by_type(
Transaction &t,
LBAMappingRef pin,
extent_types_t type)
{
LOG_PREFIX(TransactionManager::pin_to_extent_by_type);
SUBTRACET(seastore_tm, "getting extent {} type {}", t, *pin, type);
assert(is_logical_type(type));
auto &pref = *pin;
return cache->get_absent_extent_by_type(
t,
type,
pref.get_val(),
pref.get_key(),
pref.get_length(),
[pin=std::move(pin)](CachedExtent &extent) mutable {
auto &lextent = static_cast<LogicalCachedExtent&>(extent);
assert(!lextent.has_laddr());
assert(!lextent.has_been_invalidated());
assert(!pin->has_been_invalidated());
assert(pin->get_parent());
assert(!pin->get_parent()->is_pending());
pin->link_child(&lextent);
lextent.set_laddr(pin->get_key());
}
).si_then([FNAME, &t](auto ref) {
SUBTRACET(seastore_tm, "got extent -- {}", t, *ref);
assert(ref->is_fully_loaded());
return pin_to_extent_by_type_ret(
interruptible::ready_future_marker{},
std::move(ref->template cast<LogicalCachedExtent>()));
});
}
/**
* alloc_remapped_extent
*
* Allocates a new extent at given remap_paddr that must be absolute and
* use the buffer to fill the new extent if buffer exists. Otherwise, will
* not read disk to fill the new extent.
* Returns the new extent.
*
* Should make sure the end laddr of remap extent <= the end laddr of
* original extent when using this method.
*/
using alloc_remapped_extent_iertr =
alloc_extent_iertr::extend_ertr<Device::read_ertr>;
using alloc_remapped_extent_ret =
alloc_remapped_extent_iertr::future<LBAMappingRef>;
template <typename T>
alloc_remapped_extent_ret alloc_remapped_extent(
Transaction &t,
laddr_t remap_laddr,
paddr_t remap_paddr,
extent_len_t remap_length,
laddr_t original_laddr,
std::optional<ceph::bufferptr> &&original_bptr) {
LOG_PREFIX(TransactionManager::alloc_remapped_extent);
SUBDEBUG(seastore_tm, "alloc remapped extent: remap_laddr: {}, "
"remap_paddr: {}, remap_length: {}, has data in cache: {} ",
remap_laddr, remap_paddr, remap_length,
original_bptr.has_value() ? "true":"false");
auto ext = cache->alloc_remapped_extent<T>(
t,
remap_laddr,
remap_paddr,
remap_length,
original_laddr,
std::move(original_bptr));
return lba_manager->alloc_extent(
t,
remap_laddr,
remap_length,
remap_paddr,
ext.get()
).si_then([remap_laddr, remap_length, remap_paddr](auto &&ref) {
assert(ref->get_key() == remap_laddr);
assert(ref->get_val() == remap_paddr);
assert(ref->get_length() == remap_length);
return alloc_remapped_extent_iertr::make_ready_future
<LBAMappingRef>(std::move(ref));
});
}
public:
// Testing interfaces
auto get_epm() {
return epm.get();
}
auto get_lba_manager() {
return lba_manager.get();
}
auto get_backref_manager() {
return backref_manager.get();
}
auto get_cache() {
return cache.get();
}
auto get_journal() {
return journal.get();
}
};
using TransactionManagerRef = std::unique_ptr<TransactionManager>;
TransactionManagerRef make_transaction_manager(
Device *primary_device,
const std::vector<Device*> &secondary_devices,
bool is_test);
}
| 24,436 | 28.764921 | 89 | h |
null | ceph-main/src/crimson/os/seastore/backref/backref_tree_node.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/backref/backref_tree_node.h"
namespace crimson::os::seastore::backref {
std::ostream& operator<<(std::ostream &out, const backref_map_val_t& val) {
return out << "backref_map_val_t("
<< val.laddr
<< "~" << val.len << ")";
}
} // namespace crimson::os::seastore::backref
| 416 | 26.8 | 75 | cc |
null | ceph-main/src/crimson/os/seastore/backref/backref_tree_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/btree/fixed_kv_node.h"
namespace crimson::os::seastore::backref {
using backref_node_meta_t = fixed_kv_node_meta_t<paddr_t>;
using backref_node_meta_le_t = fixed_kv_node_meta_le_t<paddr_t>;
constexpr size_t INTERNAL_NODE_CAPACITY = 254;
constexpr size_t LEAF_NODE_CAPACITY = 169;
using BackrefNode = FixedKVNode<paddr_t>;
struct backref_map_val_t {
extent_len_t len = 0; ///< length of extents
laddr_t laddr = 0; ///< logical address of extents
extent_types_t type = extent_types_t::ROOT;
backref_map_val_t() = default;
backref_map_val_t(
extent_len_t len,
laddr_t laddr,
extent_types_t type)
: len(len), laddr(laddr), type(type) {}
bool operator==(const backref_map_val_t& rhs) const noexcept {
return len == rhs.len && laddr == rhs.laddr;
}
};
std::ostream& operator<<(std::ostream &out, const backref_map_val_t& val);
struct backref_map_val_le_t {
extent_len_le_t len = init_extent_len_le(0);
laddr_le_t laddr = laddr_le_t(0);
extent_types_le_t type = 0;
backref_map_val_le_t() = default;
backref_map_val_le_t(const backref_map_val_le_t &) = default;
explicit backref_map_val_le_t(const backref_map_val_t &val)
: len(init_extent_len_le(val.len)),
laddr(val.laddr),
type(extent_types_le_t(val.type)) {}
operator backref_map_val_t() const {
return backref_map_val_t{len, laddr, (extent_types_t)type};
}
};
class BackrefInternalNode
: public FixedKVInternalNode<
INTERNAL_NODE_CAPACITY,
paddr_t, paddr_le_t,
BACKREF_NODE_SIZE,
BackrefInternalNode> {
public:
template <typename... T>
BackrefInternalNode(T&&... t) :
FixedKVInternalNode(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::BACKREF_INTERNAL;
extent_types_t get_type() const final {
return TYPE;
}
};
using BackrefInternalNodeRef = BackrefInternalNode::Ref;
class BackrefLeafNode
: public FixedKVLeafNode<
LEAF_NODE_CAPACITY,
paddr_t, paddr_le_t,
backref_map_val_t, backref_map_val_le_t,
BACKREF_NODE_SIZE,
BackrefLeafNode,
false> {
public:
template <typename... T>
BackrefLeafNode(T&&... t) :
FixedKVLeafNode(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::BACKREF_LEAF;
extent_types_t get_type() const final {
return TYPE;
}
const_iterator insert(
const_iterator iter,
paddr_t key,
backref_map_val_t val,
LogicalCachedExtent*) final {
journal_insert(
iter,
key,
val,
maybe_get_delta_buffer());
return iter;
}
void update(
const_iterator iter,
backref_map_val_t val,
LogicalCachedExtent*) final {
return journal_update(
iter,
val,
maybe_get_delta_buffer());
}
void remove(const_iterator iter) final {
return journal_remove(
iter,
maybe_get_delta_buffer());
}
// backref leaf nodes don't have to resolve relative addresses
void resolve_relative_addrs(paddr_t base) final {}
void node_resolve_vals(iterator from, iterator to) const final {}
void node_unresolve_vals(iterator from, iterator to) const final {}
};
using BackrefLeafNodeRef = BackrefLeafNode::Ref;
} // namespace crimson::os::seastore::backref
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::backref::backref_map_val_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::backref::BackrefInternalNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::backref::BackrefLeafNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::backref::backref_node_meta_t> : fmt::ostream_formatter {};
#endif
| 3,877 | 27.101449 | 115 | h |
null | ceph-main/src/crimson/os/seastore/backref/btree_backref_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/backref/btree_backref_manager.h"
SET_SUBSYS(seastore_backref);
namespace crimson::os::seastore {
template<>
Transaction::tree_stats_t& get_tree_stats<
crimson::os::seastore::backref::BackrefBtree>(Transaction &t) {
return t.get_backref_tree_stats();
}
template<>
phy_tree_root_t& get_phy_tree_root<
crimson::os::seastore::backref::BackrefBtree>(root_t &r) {
return r.backref_root;
}
template<>
const get_phy_tree_root_node_ret get_phy_tree_root_node<
crimson::os::seastore::backref::BackrefBtree>(
const RootBlockRef &root_block, op_context_t<paddr_t> c) {
auto backref_root = root_block->backref_root_node;
if (backref_root) {
ceph_assert(backref_root->is_initial_pending()
== root_block->is_pending());
return {true,
trans_intr::make_interruptible(
c.cache.get_extent_viewable_by_trans(c.trans, backref_root))};
} else if (root_block->is_pending()) {
auto &prior = static_cast<RootBlock&>(*root_block->get_prior_instance());
backref_root = prior.backref_root_node;
if (backref_root) {
return {true,
trans_intr::make_interruptible(
c.cache.get_extent_viewable_by_trans(c.trans, backref_root))};
} else {
return {false,
trans_intr::make_interruptible(
Cache::get_extent_ertr::make_ready_future<
CachedExtentRef>())};
}
} else {
return {false,
trans_intr::make_interruptible(
Cache::get_extent_ertr::make_ready_future<
CachedExtentRef>())};
}
}
template <typename ROOT>
void link_phy_tree_root_node(RootBlockRef &root_block, ROOT* backref_root) {
root_block->backref_root_node = backref_root;
ceph_assert(backref_root != nullptr);
backref_root->root_block = root_block;
}
template void link_phy_tree_root_node(
RootBlockRef &root_block, backref::BackrefInternalNode* backref_root);
template void link_phy_tree_root_node(
RootBlockRef &root_block, backref::BackrefLeafNode* backref_root);
template void link_phy_tree_root_node(
RootBlockRef &root_block, backref::BackrefNode* backref_root);
template <>
void unlink_phy_tree_root_node<paddr_t>(RootBlockRef &root_block) {
root_block->backref_root_node = nullptr;
}
}
namespace crimson::os::seastore::backref {
BtreeBackrefManager::mkfs_ret
BtreeBackrefManager::mkfs(
Transaction &t)
{
LOG_PREFIX(BtreeBackrefManager::mkfs);
INFOT("start", t);
return cache.get_root(t).si_then([this, &t](auto croot) {
assert(croot->is_mutation_pending());
croot->get_root().backref_root = BackrefBtree::mkfs(croot, get_context(t));
return mkfs_iertr::now();
}).handle_error_interruptible(
mkfs_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in BtreeBackrefManager::mkfs"
}
);
}
BtreeBackrefManager::get_mapping_ret
BtreeBackrefManager::get_mapping(
Transaction &t,
paddr_t offset)
{
LOG_PREFIX(BtreeBackrefManager::get_mapping);
TRACET("{}", t, offset);
auto c = get_context(t);
return with_btree_ret<BackrefBtree, BackrefMappingRef>(
cache,
c,
[c, offset](auto &btree) {
return btree.lower_bound(
c, offset
).si_then([offset, c](auto iter) -> get_mapping_ret {
LOG_PREFIX(BtreeBackrefManager::get_mapping);
if (iter.is_end() || iter.get_key() != offset) {
ERRORT("{} doesn't exist", c.trans, offset);
return crimson::ct_error::enoent::make();
} else {
TRACET("{} got {}, {}",
c.trans, offset, iter.get_key(), iter.get_val());
auto e = iter.get_pin(c);
return get_mapping_ret(
interruptible::ready_future_marker{},
std::move(e));
}
});
});
}
BtreeBackrefManager::get_mappings_ret
BtreeBackrefManager::get_mappings(
Transaction &t,
paddr_t offset,
paddr_t end)
{
LOG_PREFIX(BtreeBackrefManager::get_mappings);
TRACET("{}~{}", t, offset, end);
auto c = get_context(t);
return with_btree_state<BackrefBtree, backref_pin_list_t>(
cache,
c,
[c, offset, end](auto &btree, auto &ret) {
return BackrefBtree::iterate_repeat(
c,
btree.upper_bound_right(c, offset),
[&ret, offset, end, c](auto &pos) {
LOG_PREFIX(BtreeBackrefManager::get_mappings);
if (pos.is_end() || pos.get_key() >= end) {
TRACET("{}~{} done with {} results",
c.trans, offset, end, ret.size());
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
}
TRACET("{}~{} got {}, {}, repeat ...",
c.trans, offset, end, pos.get_key(), pos.get_val());
ceph_assert((pos.get_key().add_offset(pos.get_val().len)) > offset);
ret.push_back(pos.get_pin(c));
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
});
});
}
BtreeBackrefManager::new_mapping_ret
BtreeBackrefManager::new_mapping(
Transaction &t,
paddr_t key,
extent_len_t len,
laddr_t addr,
extent_types_t type)
{
ceph_assert(
is_aligned(
key.get_addr_type() == paddr_types_t::SEGMENT ?
key.as_seg_paddr().get_segment_off() :
key.as_blk_paddr().get_device_off(),
cache.get_block_size()));
struct state_t {
paddr_t last_end;
std::optional<BackrefBtree::iterator> insert_iter;
std::optional<BackrefBtree::iterator> ret;
state_t(paddr_t hint) : last_end(hint) {}
};
LOG_PREFIX(BtreeBackrefManager::new_mapping);
DEBUGT("{}~{}, paddr={}", t, addr, len, key);
backref_map_val_t val{len, addr, type};
auto c = get_context(t);
//++stats.num_alloc_extents;
//auto lookup_attempts = stats.num_alloc_extents_iter_nexts;
return crimson::os::seastore::with_btree_state<BackrefBtree, state_t>(
cache,
c,
key,
[val, c, key, len, addr, /*lookup_attempts,*/ &t]
(auto &btree, auto &state) {
return BackrefBtree::iterate_repeat(
c,
btree.upper_bound_right(c, key),
[&state, len, addr, &t, key/*, lookup_attempts*/](auto &pos) {
LOG_PREFIX(BtreeBackrefManager::new_mapping);
//++stats.num_alloc_extents_iter_nexts;
if (pos.is_end()) {
DEBUGT("{}~{}, paddr={}, state: end, insert at {}",
t, addr, len, key,
//stats.num_alloc_extents_iter_nexts - lookup_attempts,
state.last_end);
state.insert_iter = pos;
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
} else if (pos.get_key() >= (state.last_end.add_offset(len))) {
DEBUGT("{}~{}, paddr={}, state: {}~{}, "
"insert at {} -- {}",
t, addr, len, key,
pos.get_key(), pos.get_val().len,
//stats.num_alloc_extents_iter_nexts - lookup_attempts,
state.last_end,
pos.get_val());
state.insert_iter = pos;
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
} else {
ERRORT("{}~{}, paddr={}, state: {}~{}, repeat ... -- {}",
t, addr, len, key,
pos.get_key(), pos.get_val().len,
pos.get_val());
ceph_abort("not possible for the backref tree");
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
}
}).si_then([c, addr, len, key, &btree, &state, val] {
return btree.insert(
c,
*state.insert_iter,
state.last_end,
val,
nullptr
).si_then([&state, c, addr, len, key](auto &&p) {
LOG_PREFIX(BtreeBackrefManager::new_mapping);
auto [iter, inserted] = std::move(p);
TRACET("{}~{}, paddr={}, inserted at {}, leaf {}",
c.trans, addr, len, key, state.last_end, *iter.get_leaf_node());
ceph_assert(inserted);
state.ret = iter;
});
});
}).si_then([c](auto &&state) {
return state.ret->get_pin(c);
});
}
BtreeBackrefManager::merge_cached_backrefs_ret
BtreeBackrefManager::merge_cached_backrefs(
Transaction &t,
const journal_seq_t &limit,
const uint64_t max)
{
LOG_PREFIX(BtreeBackrefManager::merge_cached_backrefs);
DEBUGT("insert up to {}", t, limit);
return seastar::do_with(
limit,
JOURNAL_SEQ_NULL,
[this, &t, max](auto &limit, auto &inserted_to) {
auto &backref_entryrefs_by_seq = cache.get_backref_entryrefs_by_seq();
return seastar::do_with(
backref_entryrefs_by_seq.begin(),
JOURNAL_SEQ_NULL,
[this, &t, &limit, &backref_entryrefs_by_seq, max](auto &iter, auto &inserted_to) {
return trans_intr::repeat(
[&iter, this, &t, &limit, &backref_entryrefs_by_seq, max, &inserted_to]()
-> merge_cached_backrefs_iertr::future<seastar::stop_iteration> {
if (iter == backref_entryrefs_by_seq.end()) {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
auto &seq = iter->first;
auto &backref_entry_refs = iter->second;
LOG_PREFIX(BtreeBackrefManager::merge_cached_backrefs);
DEBUGT("seq {}, limit {}, num_fresh_backref {}"
, t, seq, limit, t.get_num_fresh_backref());
if (seq <= limit && t.get_num_fresh_backref() * BACKREF_NODE_SIZE < max) {
inserted_to = seq;
return trans_intr::do_for_each(
backref_entry_refs,
[this, &t](auto &backref_entry_ref) {
LOG_PREFIX(BtreeBackrefManager::merge_cached_backrefs);
auto &backref_entry = *backref_entry_ref;
if (backref_entry.laddr != L_ADDR_NULL) {
DEBUGT("new mapping: {}~{} -> {}",
t,
backref_entry.paddr,
backref_entry.len,
backref_entry.laddr);
return new_mapping(
t,
backref_entry.paddr,
backref_entry.len,
backref_entry.laddr,
backref_entry.type).si_then([](auto &&pin) {
return seastar::now();
});
} else {
DEBUGT("remove mapping: {}", t, backref_entry.paddr);
return remove_mapping(
t,
backref_entry.paddr
).si_then([](auto&&) {
return seastar::now();
}).handle_error_interruptible(
crimson::ct_error::input_output_error::pass_further(),
crimson::ct_error::assert_all("no enoent possible")
);
}
}).si_then([&iter] {
iter++;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
}
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}).si_then([&inserted_to] {
return seastar::make_ready_future<journal_seq_t>(
std::move(inserted_to));
});
});
return merge_cached_backrefs_iertr::make_ready_future<journal_seq_t>(
std::move(inserted_to));
});
}
BtreeBackrefManager::check_child_trackers_ret
BtreeBackrefManager::check_child_trackers(
Transaction &t) {
auto c = get_context(t);
return with_btree<BackrefBtree>(
cache, c,
[c](auto &btree) {
return btree.check_child_trackers(c);
});
}
BtreeBackrefManager::scan_mapped_space_ret
BtreeBackrefManager::scan_mapped_space(
Transaction &t,
BtreeBackrefManager::scan_mapped_space_func_t &&f)
{
LOG_PREFIX(BtreeBackrefManager::scan_mapped_space);
DEBUGT("scan backref tree", t);
auto c = get_context(t);
return seastar::do_with(
std::move(f),
[this, c, FNAME](auto &scan_visitor)
{
auto block_size = cache.get_block_size();
// traverse leaf-node entries
return with_btree<BackrefBtree>(
cache, c,
[c, &scan_visitor, block_size, FNAME](auto &btree)
{
return BackrefBtree::iterate_repeat(
c,
btree.lower_bound(
c,
P_ADDR_MIN),
[c, &scan_visitor, block_size, FNAME](auto &pos) {
if (pos.is_end()) {
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
}
TRACET("tree value {}~{} {}~{} {} used",
c.trans,
pos.get_key(),
pos.get_val().len,
pos.get_val().laddr,
pos.get_val().len,
pos.get_val().type);
ceph_assert(pos.get_key().is_absolute());
ceph_assert(pos.get_val().len > 0 &&
pos.get_val().len % block_size == 0);
ceph_assert(!is_backref_node(pos.get_val().type));
ceph_assert(pos.get_val().laddr != L_ADDR_NULL);
scan_visitor(
pos.get_key(),
P_ADDR_NULL,
pos.get_val().len,
pos.get_val().type,
pos.get_val().laddr);
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
}
);
}).si_then([this, &scan_visitor, c, FNAME, block_size] {
// traverse alloc-deltas in order
auto &backref_entryrefs = cache.get_backref_entryrefs_by_seq();
for (auto &[seq, refs] : backref_entryrefs) {
boost::ignore_unused(seq);
DEBUGT("scan {} backref entries", c.trans, refs.size());
for (auto &backref_entry : refs) {
if (backref_entry->laddr == L_ADDR_NULL) {
TRACET("backref entry {}~{} {} free",
c.trans,
backref_entry->paddr,
backref_entry->len,
backref_entry->type);
} else {
TRACET("backref entry {}~{} {}~{} {} used",
c.trans,
backref_entry->paddr,
backref_entry->len,
backref_entry->laddr,
backref_entry->len,
backref_entry->type);
}
ceph_assert(backref_entry->paddr.is_absolute());
ceph_assert(backref_entry->len > 0 &&
backref_entry->len % block_size == 0);
ceph_assert(!is_backref_node(backref_entry->type));
scan_visitor(
backref_entry->paddr,
P_ADDR_NULL,
backref_entry->len,
backref_entry->type,
backref_entry->laddr);
}
}
}).si_then([this, &scan_visitor, block_size, c, FNAME] {
BackrefBtree::mapped_space_visitor_t f =
[&scan_visitor, block_size, FNAME, c](
paddr_t paddr, paddr_t key, extent_len_t len,
depth_t depth, extent_types_t type, BackrefBtree::iterator&) {
TRACET("tree node {}~{} {}, depth={} used",
c.trans, paddr, len, type, depth);
ceph_assert(paddr.is_absolute());
ceph_assert(len > 0 && len % block_size == 0);
ceph_assert(depth >= 1);
ceph_assert(is_backref_node(type));
return scan_visitor(paddr, key, len, type, L_ADDR_NULL);
};
return seastar::do_with(
std::move(f),
[this, c](auto &tree_visitor)
{
// traverse internal-node entries
return with_btree<BackrefBtree>(
cache, c,
[c, &tree_visitor](auto &btree)
{
return BackrefBtree::iterate_repeat(
c,
btree.lower_bound(
c,
P_ADDR_MIN,
&tree_visitor),
[](auto &pos) {
if (pos.is_end()) {
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
}
return BackrefBtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
},
&tree_visitor
);
});
});
});
});
}
BtreeBackrefManager::base_iertr::future<> _init_cached_extent(
op_context_t<paddr_t> c,
const CachedExtentRef &e,
BackrefBtree &btree,
bool &ret)
{
return btree.init_cached_extent(c, e
).si_then([&ret](bool is_alive) {
ret = is_alive;
});
}
BtreeBackrefManager::init_cached_extent_ret BtreeBackrefManager::init_cached_extent(
Transaction &t,
CachedExtentRef e)
{
LOG_PREFIX(BtreeBackrefManager::init_cached_extent);
TRACET("{}", t, *e);
return seastar::do_with(bool(), [this, e, &t](bool &ret) {
auto c = get_context(t);
return with_btree<BackrefBtree>(cache, c, [c, e, &ret](auto &btree)
-> base_iertr::future<> {
LOG_PREFIX(BtreeBackrefManager::init_cached_extent);
DEBUGT("extent {}", c.trans, *e);
return _init_cached_extent(c, e, btree, ret);
}).si_then([&ret] { return ret; });
});
}
BtreeBackrefManager::rewrite_extent_ret
BtreeBackrefManager::rewrite_extent(
Transaction &t,
CachedExtentRef extent)
{
auto c = get_context(t);
return with_btree<BackrefBtree>(
cache,
c,
[c, extent](auto &btree) mutable {
return btree.rewrite_extent(c, extent);
});
}
BtreeBackrefManager::remove_mapping_ret
BtreeBackrefManager::remove_mapping(
Transaction &t,
paddr_t addr)
{
auto c = get_context(t);
return with_btree_ret<BackrefBtree, remove_mapping_result_t>(
cache,
c,
[c, addr](auto &btree) mutable {
return btree.lower_bound(
c, addr
).si_then([&btree, c, addr](auto iter)
-> remove_mapping_ret {
if (iter.is_end() || iter.get_key() != addr) {
LOG_PREFIX(BtreeBackrefManager::remove_mapping);
WARNT("paddr={} doesn't exist, state: {}, leaf {}",
c.trans, addr, iter.get_key(), *iter.get_leaf_node());
return remove_mapping_iertr::make_ready_future<
remove_mapping_result_t>(remove_mapping_result_t());
}
auto ret = remove_mapping_result_t{
iter.get_key(),
iter.get_val().len,
iter.get_val().laddr};
return btree.remove(
c,
iter
).si_then([ret] {
return ret;
});
});
});
}
Cache::backref_entry_query_mset_t
BtreeBackrefManager::get_cached_backref_entries_in_range(
paddr_t start,
paddr_t end)
{
return cache.get_backref_entries_in_range(start, end);
}
void BtreeBackrefManager::cache_new_backref_extent(
paddr_t paddr,
paddr_t key,
extent_types_t type)
{
return cache.add_backref_extent(paddr, key, type);
}
BtreeBackrefManager::retrieve_backref_extents_in_range_ret
BtreeBackrefManager::retrieve_backref_extents_in_range(
Transaction &t,
paddr_t start,
paddr_t end)
{
auto backref_extents = cache.get_backref_extents_in_range(start, end);
return seastar::do_with(
std::vector<CachedExtentRef>(),
std::move(backref_extents),
[this, &t](auto &extents, auto &backref_extents) {
return trans_intr::parallel_for_each(
backref_extents,
[this, &extents, &t](auto &ent) {
// only the gc fiber which is single can rewrite backref extents,
// so it must be alive
assert(is_backref_node(ent.type));
LOG_PREFIX(BtreeBackrefManager::retrieve_backref_extents_in_range);
DEBUGT("getting backref extent of type {} at {}, key {}",
t,
ent.type,
ent.paddr,
ent.key);
auto c = get_context(t);
return with_btree_ret<BackrefBtree, CachedExtentRef>(
cache,
c,
[c, &ent](auto &btree) {
if (ent.type == extent_types_t::BACKREF_INTERNAL) {
return btree.get_internal_if_live(
c, ent.paddr, ent.key, BACKREF_NODE_SIZE);
} else {
assert(ent.type == extent_types_t::BACKREF_LEAF);
return btree.get_leaf_if_live(
c, ent.paddr, ent.key, BACKREF_NODE_SIZE);
}
}).si_then([&extents](auto ext) {
ceph_assert(ext);
extents.emplace_back(std::move(ext));
});
}).si_then([&extents] {
return std::move(extents);
});
});
}
} // namespace crimson::os::seastore::backref
| 19,198 | 29.916264 | 89 | cc |
null | ceph-main/src/crimson/os/seastore/backref/btree_backref_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/backref_manager.h"
#include "crimson/os/seastore/backref/backref_tree_node.h"
#include "crimson/os/seastore/btree/fixed_kv_btree.h"
namespace crimson::os::seastore::backref {
constexpr size_t BACKREF_BLOCK_SIZE = 4096;
class BtreeBackrefMapping : public BtreeNodeMapping<paddr_t, laddr_t> {
extent_types_t type;
public:
BtreeBackrefMapping(op_context_t<paddr_t> ctx)
: BtreeNodeMapping(ctx) {}
BtreeBackrefMapping(
op_context_t<paddr_t> ctx,
CachedExtentRef parent,
uint16_t pos,
backref_map_val_t &val,
backref_node_meta_t &&meta)
: BtreeNodeMapping(
ctx,
parent,
pos,
val.laddr,
val.len,
std::forward<backref_node_meta_t>(meta)),
type(val.type)
{}
extent_types_t get_type() const final {
return type;
}
};
using BackrefBtree = FixedKVBtree<
paddr_t, backref_map_val_t, BackrefInternalNode,
BackrefLeafNode, BtreeBackrefMapping, BACKREF_BLOCK_SIZE, false>;
class BtreeBackrefManager : public BackrefManager {
public:
BtreeBackrefManager(Cache &cache)
: cache(cache)
{}
mkfs_ret mkfs(
Transaction &t) final;
get_mapping_ret get_mapping(
Transaction &t,
paddr_t offset) final;
get_mappings_ret get_mappings(
Transaction &t,
paddr_t offset,
paddr_t end) final;
new_mapping_ret new_mapping(
Transaction &t,
paddr_t key,
extent_len_t len,
laddr_t val,
extent_types_t type) final;
merge_cached_backrefs_ret merge_cached_backrefs(
Transaction &t,
const journal_seq_t &limit,
const uint64_t max) final;
remove_mapping_ret remove_mapping(
Transaction &t,
paddr_t offset) final;
check_child_trackers_ret check_child_trackers(Transaction &t) final;
scan_mapped_space_ret scan_mapped_space(
Transaction &t,
scan_mapped_space_func_t &&f) final;
init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) final;
rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) final;
Cache::backref_entry_query_mset_t
get_cached_backref_entries_in_range(
paddr_t start,
paddr_t end) final;
retrieve_backref_extents_in_range_ret
retrieve_backref_extents_in_range(
Transaction &t,
paddr_t start,
paddr_t end) final;
void cache_new_backref_extent(
paddr_t paddr,
paddr_t key,
extent_types_t type) final;
private:
Cache &cache;
op_context_t<paddr_t> get_context(Transaction &t) {
return op_context_t<paddr_t>{cache, t};
}
};
} // namespace crimson::os::seastore::backref
| 2,677 | 21.888889 | 71 | h |
null | ceph-main/src/crimson/os/seastore/btree/btree_range_pin.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/btree/btree_range_pin.h"
#include "crimson/os/seastore/btree/fixed_kv_node.h"
namespace crimson::os::seastore {
template <typename key_t, typename val_t>
get_child_ret_t<LogicalCachedExtent>
BtreeNodeMapping<key_t, val_t>::get_logical_extent(
Transaction &t)
{
assert(parent);
assert(parent->is_valid());
assert(pos != std::numeric_limits<uint16_t>::max());
auto &p = (FixedKVNode<key_t>&)*parent;
auto v = p.get_logical_child(ctx, pos);
if (!v.has_child()) {
this->child_pos = v.get_child_pos();
}
return v;
}
template class BtreeNodeMapping<laddr_t, paddr_t>;
template class BtreeNodeMapping<paddr_t, laddr_t>;
} // namespace crimson::os::seastore
| 804 | 27.75 | 70 | cc |
null | ceph-main/src/crimson/os/seastore/btree/btree_range_pin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive/set.hpp>
#include "crimson/common/log.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore {
template <typename node_key_t>
struct op_context_t {
Cache &cache;
Transaction &trans;
};
constexpr uint16_t MAX_FIXEDKVBTREE_DEPTH = 8;
template <typename T>
struct min_max_t {};
template <>
struct min_max_t<laddr_t> {
static constexpr laddr_t max = L_ADDR_MAX;
static constexpr laddr_t min = L_ADDR_MIN;
};
template <>
struct min_max_t<paddr_t> {
static constexpr paddr_t max = P_ADDR_MAX;
static constexpr paddr_t min = P_ADDR_MIN;
};
template <typename bound_t>
struct fixed_kv_node_meta_t {
bound_t begin = min_max_t<bound_t>::min;
bound_t end = min_max_t<bound_t>::min;
depth_t depth = 0;
bool is_parent_of(const fixed_kv_node_meta_t &other) const {
return (depth == other.depth + 1) &&
(begin <= other.begin) &&
(end > other.begin);
}
bool is_in_range(const bound_t key) const {
return begin <= key && end > key;
}
std::pair<fixed_kv_node_meta_t, fixed_kv_node_meta_t> split_into(bound_t pivot) const {
return std::make_pair(
fixed_kv_node_meta_t{begin, pivot, depth},
fixed_kv_node_meta_t{pivot, end, depth});
}
static fixed_kv_node_meta_t merge_from(
const fixed_kv_node_meta_t &lhs, const fixed_kv_node_meta_t &rhs) {
ceph_assert(lhs.depth == rhs.depth);
return fixed_kv_node_meta_t{lhs.begin, rhs.end, lhs.depth};
}
static std::pair<fixed_kv_node_meta_t, fixed_kv_node_meta_t>
rebalance(const fixed_kv_node_meta_t &lhs, const fixed_kv_node_meta_t &rhs, bound_t pivot) {
ceph_assert(lhs.depth == rhs.depth);
return std::make_pair(
fixed_kv_node_meta_t{lhs.begin, pivot, lhs.depth},
fixed_kv_node_meta_t{pivot, rhs.end, lhs.depth});
}
bool is_root() const {
return begin == min_max_t<bound_t>::min && end == min_max_t<bound_t>::max;
}
};
template <typename bound_t>
inline std::ostream &operator<<(
std::ostream &lhs,
const fixed_kv_node_meta_t<bound_t> &rhs)
{
return lhs << "btree_node_meta_t("
<< "begin=" << rhs.begin
<< ", end=" << rhs.end
<< ", depth=" << rhs.depth
<< ")";
}
/**
* fixed_kv_node_meta_le_t
*
* On disk layout for fixed_kv_node_meta_t
*/
template <typename bound_le_t>
struct fixed_kv_node_meta_le_t {
bound_le_t begin = bound_le_t(0);
bound_le_t end = bound_le_t(0);
depth_le_t depth = init_depth_le(0);
fixed_kv_node_meta_le_t() = default;
fixed_kv_node_meta_le_t(
const fixed_kv_node_meta_le_t<bound_le_t> &) = default;
explicit fixed_kv_node_meta_le_t(
const fixed_kv_node_meta_t<typename bound_le_t::orig_type> &val)
: begin(val.begin),
end(val.end),
depth(init_depth_le(val.depth)) {}
operator fixed_kv_node_meta_t<typename bound_le_t::orig_type>() const {
return fixed_kv_node_meta_t<typename bound_le_t::orig_type>{
begin, end, depth };
}
};
template <typename key_t, typename val_t>
class BtreeNodeMapping : public PhysicalNodeMapping<key_t, val_t> {
op_context_t<key_t> ctx;
/**
* parent
*
* populated until link_extent is called to ensure cache residence
* until add_pin is called.
*/
CachedExtentRef parent;
val_t value;
extent_len_t len;
fixed_kv_node_meta_t<key_t> range;
uint16_t pos = std::numeric_limits<uint16_t>::max();
public:
using val_type = val_t;
BtreeNodeMapping(op_context_t<key_t> ctx) : ctx(ctx) {}
BtreeNodeMapping(
op_context_t<key_t> ctx,
CachedExtentRef parent,
uint16_t pos,
val_t &value,
extent_len_t len,
fixed_kv_node_meta_t<key_t> &&meta)
: ctx(ctx),
parent(parent),
value(value),
len(len),
range(std::move(meta)),
pos(pos)
{
if (!parent->is_pending()) {
this->child_pos = {parent, pos};
}
}
CachedExtentRef get_parent() const final {
return parent;
}
CachedExtentRef get_parent() {
return parent;
}
void set_parent(CachedExtentRef ext) {
parent = ext;
}
uint16_t get_pos() const final {
return pos;
}
extent_len_t get_length() const final {
ceph_assert(range.end > range.begin);
return len;
}
extent_types_t get_type() const override {
ceph_abort("should never happen");
return extent_types_t::ROOT;
}
val_t get_val() const final {
return value;
}
key_t get_key() const final {
return range.begin;
}
PhysicalNodeMappingRef<key_t, val_t> duplicate() const final {
auto ret = std::unique_ptr<BtreeNodeMapping<key_t, val_t>>(
new BtreeNodeMapping<key_t, val_t>(ctx));
ret->range = range;
ret->value = value;
ret->parent = parent;
ret->len = len;
ret->pos = pos;
return ret;
}
bool has_been_invalidated() const final {
return parent->has_been_invalidated();
}
get_child_ret_t<LogicalCachedExtent> get_logical_extent(Transaction&) final;
};
}
| 5,120 | 23.270142 | 94 | h |
null | ceph-main/src/crimson/os/seastore/btree/fixed_kv_btree.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <boost/container/static_vector.hpp>
#include <sys/mman.h>
#include <memory>
#include <string.h>
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/btree/btree_range_pin.h"
#include "crimson/os/seastore/root_block.h"
#define RESERVATION_PTR reinterpret_cast<ChildableCachedExtent*>(0x1)
namespace crimson::os::seastore::lba_manager::btree {
struct lba_map_val_t;
}
namespace crimson::os::seastore {
bool is_valid_child_ptr(ChildableCachedExtent* child);
template <typename T>
phy_tree_root_t& get_phy_tree_root(root_t& r);
using get_child_iertr =
::crimson::interruptible::interruptible_errorator<
typename trans_intr::condition,
get_child_ertr>;
using get_phy_tree_root_node_ret =
std::pair<bool, get_child_iertr::future<CachedExtentRef>>;
template <typename T, typename key_t>
const get_phy_tree_root_node_ret get_phy_tree_root_node(
const RootBlockRef &root_block,
op_context_t<key_t> c);
template <typename ROOT_T>
void link_phy_tree_root_node(RootBlockRef &root_block, ROOT_T* root_node);
template <typename T>
void unlink_phy_tree_root_node(RootBlockRef &root_block);
template <typename T>
Transaction::tree_stats_t& get_tree_stats(Transaction &t);
template <
typename node_key_t,
typename node_val_t,
typename internal_node_t,
typename leaf_node_t,
typename pin_t,
size_t node_size,
bool leaf_has_children>
class FixedKVBtree {
static constexpr size_t MAX_DEPTH = 16;
using self_type = FixedKVBtree<
node_key_t,
node_val_t,
internal_node_t,
leaf_node_t,
pin_t,
node_size,
leaf_has_children>;
public:
using InternalNodeRef = TCachedExtentRef<internal_node_t>;
using LeafNodeRef = TCachedExtentRef<leaf_node_t>;
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using base_iertr = trans_iertr<base_ertr>;
class iterator;
using iterator_fut = base_iertr::future<iterator>;
using mapped_space_visitor_t = std::function<
void(paddr_t, node_key_t, extent_len_t, depth_t, extent_types_t, iterator&)>;
class iterator {
public:
iterator(const iterator &rhs) noexcept :
internal(rhs.internal), leaf(rhs.leaf) {}
iterator(iterator &&rhs) noexcept :
internal(std::move(rhs.internal)), leaf(std::move(rhs.leaf)) {}
iterator &operator=(const iterator &) = default;
iterator &operator=(iterator &&) = default;
iterator_fut next(
op_context_t<node_key_t> c,
mapped_space_visitor_t *visitor=nullptr) const
{
assert_valid();
assert(!is_end());
auto ret = *this;
ret.leaf.pos++;
if (ret.at_boundary()) {
return seastar::do_with(
ret,
[c, visitor](auto &ret) mutable {
return ret.handle_boundary(
c, visitor
).si_then([&ret] {
return std::move(ret);
});
});
} else {
return iterator_fut(
interruptible::ready_future_marker{},
ret);
}
}
iterator_fut prev(op_context_t<node_key_t> c) const
{
assert_valid();
assert(!is_begin());
auto ret = *this;
if (ret.leaf.pos > 0) {
ret.leaf.pos--;
return iterator_fut(
interruptible::ready_future_marker{},
ret);
}
depth_t depth_with_space = 2;
for (; depth_with_space <= get_depth(); ++depth_with_space) {
if (ret.get_internal(depth_with_space).pos > 0) {
break;
}
}
assert(depth_with_space <= ret.get_depth()); // must not be begin()
return seastar::do_with(
std::move(ret),
[](const internal_node_t &internal) { return --internal.end(); },
[](const leaf_node_t &leaf) { return --leaf.end(); },
[c, depth_with_space](auto &ret, auto &li, auto &ll) {
for (depth_t depth = 2; depth < depth_with_space; ++depth) {
ret.get_internal(depth).reset();
}
ret.leaf.reset();
ret.get_internal(depth_with_space).pos--;
// note, cannot result in at_boundary() by construction
return lookup_depth_range(
c, ret, depth_with_space - 1, 0, li, ll, nullptr
).si_then([&ret] {
assert(!ret.at_boundary());
return std::move(ret);
});
});
}
void assert_valid() const {
assert(leaf.node);
assert(leaf.pos <= leaf.node->get_size());
for (auto &i: internal) {
(void)i;
assert(i.node);
assert(i.pos < i.node->get_size());
}
}
depth_t get_depth() const {
return internal.size() + 1;
}
auto &get_internal(depth_t depth) {
assert(depth > 1);
assert((depth - 2) < internal.size());
return internal[depth - 2];
}
const auto &get_internal(depth_t depth) const {
assert(depth > 1);
assert((depth - 2) < internal.size());
return internal[depth - 2];
}
node_key_t get_key() const {
assert(!is_end());
return leaf.node->iter_idx(leaf.pos).get_key();
}
node_val_t get_val() const {
assert(!is_end());
auto ret = leaf.node->iter_idx(leaf.pos).get_val();
if constexpr (
std::is_same_v<crimson::os::seastore::lba_manager::btree::lba_map_val_t,
node_val_t>) {
ret.paddr = ret.paddr.maybe_relative_to(leaf.node->get_paddr());
}
return ret;
}
bool is_end() const {
// external methods may only resolve at a boundary if at end
return at_boundary();
}
bool is_begin() const {
for (auto &i: internal) {
if (i.pos != 0)
return false;
}
return leaf.pos == 0;
}
PhysicalNodeMappingRef<node_key_t, typename pin_t::val_type>
get_pin(op_context_t<node_key_t> ctx) const {
assert(!is_end());
auto val = get_val();
auto key = get_key();
return std::make_unique<pin_t>(
ctx,
leaf.node,
leaf.pos,
val,
fixed_kv_node_meta_t<node_key_t>{ key, key + val.len, 0 });
}
typename leaf_node_t::Ref get_leaf_node() {
return leaf.node;
}
uint16_t get_leaf_pos() {
return leaf.pos;
}
private:
iterator() noexcept {}
iterator(depth_t depth) noexcept : internal(depth - 1) {}
friend class FixedKVBtree;
static constexpr uint16_t INVALID = std::numeric_limits<uint16_t>::max();
template <typename NodeType>
struct node_position_t {
typename NodeType::Ref node;
uint16_t pos = INVALID;
node_position_t() = default;
node_position_t(
typename NodeType::Ref node,
uint16_t pos)
: node(node), pos(pos) {}
void reset() {
*this = node_position_t{};
}
auto get_iter() {
assert(pos != INVALID);
assert(pos < node->get_size());
return node->iter_idx(pos);
}
};
boost::container::static_vector<
node_position_t<internal_node_t>, MAX_DEPTH> internal;
node_position_t<leaf_node_t> leaf;
bool at_boundary() const {
assert(leaf.pos <= leaf.node->get_size());
return leaf.pos == leaf.node->get_size();
}
using handle_boundary_ertr = base_iertr;
using handle_boundary_ret = handle_boundary_ertr::future<>;
handle_boundary_ret handle_boundary(
op_context_t<node_key_t> c,
mapped_space_visitor_t *visitor)
{
assert(at_boundary());
depth_t depth_with_space = 2;
for (; depth_with_space <= get_depth(); ++depth_with_space) {
if ((get_internal(depth_with_space).pos + 1) <
get_internal(depth_with_space).node->get_size()) {
break;
}
}
if (depth_with_space <= get_depth()) {
return seastar::do_with(
[](const internal_node_t &internal) { return internal.begin(); },
[](const leaf_node_t &leaf) { return leaf.begin(); },
[this, c, depth_with_space, visitor](auto &li, auto &ll) {
for (depth_t depth = 2; depth < depth_with_space; ++depth) {
get_internal(depth).reset();
}
leaf.reset();
get_internal(depth_with_space).pos++;
// note, cannot result in at_boundary() by construction
return lookup_depth_range(
c, *this, depth_with_space - 1, 0, li, ll, visitor
);
});
} else {
// end
return seastar::now();
}
}
depth_t check_split() const {
if (!leaf.node->at_max_capacity()) {
return 0;
}
for (depth_t split_from = 1; split_from < get_depth(); ++split_from) {
if (!get_internal(split_from + 1).node->at_max_capacity())
return split_from;
}
return get_depth();
}
depth_t check_merge() const {
if (!leaf.node->below_min_capacity()) {
return 0;
}
for (depth_t merge_from = 1; merge_from < get_depth(); ++merge_from) {
if (!get_internal(merge_from + 1).node->below_min_capacity())
return merge_from;
}
return get_depth();
}
};
FixedKVBtree(RootBlockRef &root_block) : root_block(root_block) {}
auto& get_root() {
return get_phy_tree_root<self_type>(root_block->get_root());
}
auto& get_root() const {
return get_phy_tree_root<self_type>(root_block->get_root());
}
template <typename T>
void set_root_node(const TCachedExtentRef<T> &root_node) {
static_assert(std::is_base_of_v<typename internal_node_t::base_t, T>);
link_phy_tree_root_node(root_block, root_node.get());
}
auto get_root_node(op_context_t<node_key_t> c) const {
return get_phy_tree_root_node<self_type>(root_block, c);
}
/// mkfs
using mkfs_ret = phy_tree_root_t;
static mkfs_ret mkfs(RootBlockRef &root_block, op_context_t<node_key_t> c) {
assert(root_block->is_mutation_pending());
auto root_leaf = c.cache.template alloc_new_extent<leaf_node_t>(
c.trans,
node_size,
placement_hint_t::HOT,
INIT_GENERATION);
root_leaf->set_size(0);
fixed_kv_node_meta_t<node_key_t> meta{min_max_t<node_key_t>::min, min_max_t<node_key_t>::max, 1};
root_leaf->set_meta(meta);
root_leaf->range = meta;
get_tree_stats<self_type>(c.trans).depth = 1u;
get_tree_stats<self_type>(c.trans).extents_num_delta++;
link_phy_tree_root_node(root_block, root_leaf.get());
return phy_tree_root_t{root_leaf->get_paddr(), 1u};
}
/**
* lower_bound
*
* @param c [in] context
* @param addr [in] ddr
* @return least iterator >= key
*/
iterator_fut lower_bound(
op_context_t<node_key_t> c,
node_key_t addr,
mapped_space_visitor_t *visitor=nullptr,
depth_t min_depth = 1) const
{
LOG_PREFIX(FixedKVBtree::lower_bound);
return lookup(
c,
[addr](const internal_node_t &internal) {
assert(internal.get_size() > 0);
auto iter = internal.upper_bound(addr);
assert(iter != internal.begin());
--iter;
return iter;
},
[FNAME, c, addr](const leaf_node_t &leaf) {
auto ret = leaf.lower_bound(addr);
SUBTRACET(
seastore_fixedkv_tree,
"leaf addr {}, got ret offset {}, size {}, end {}",
c.trans,
addr,
ret.get_offset(),
leaf.get_size(),
ret == leaf.end());
return ret;
},
min_depth,
visitor
).si_then([FNAME, c, min_depth](auto &&ret) {
SUBTRACET(
seastore_fixedkv_tree,
"ret.leaf.pos {}",
c.trans,
ret.leaf.pos);
if (min_depth == 1) {
ret.assert_valid();
}
return std::move(ret);
});
}
/**
* upper_bound
*
* @param c [in] context
* @param addr [in] ddr
* @return least iterator > key
*/
iterator_fut upper_bound(
op_context_t<node_key_t> c,
node_key_t addr
) const {
return lower_bound(
c, addr
).si_then([c, addr](auto iter) {
if (!iter.is_end() && iter.get_key() == addr) {
return iter.next(c);
} else {
return iterator_fut(
interruptible::ready_future_marker{},
iter);
}
});
}
/**
* upper_bound_right
*
* @param c [in] context
* @param addr [in] addr
* @return least iterator i s.t. i.get_key() + i.get_val().len > key
*/
iterator_fut upper_bound_right(
op_context_t<node_key_t> c,
node_key_t addr) const
{
return lower_bound(
c, addr
).si_then([c, addr](auto iter) {
if (iter.is_begin()) {
return iterator_fut(
interruptible::ready_future_marker{},
iter);
} else {
return iter.prev(
c
).si_then([iter, addr](auto prev) {
if ((prev.get_key() + prev.get_val().len) > addr) {
return iterator_fut(
interruptible::ready_future_marker{},
prev);
} else {
return iterator_fut(
interruptible::ready_future_marker{},
iter);
}
});
}
});
}
iterator_fut begin(op_context_t<node_key_t> c) const {
return lower_bound(c, 0);
}
iterator_fut end(op_context_t<node_key_t> c) const {
return upper_bound(c, min_max_t<node_key_t>::max);
}
template <typename child_node_t, typename node_t>
void check_node(
op_context_t<node_key_t> c,
TCachedExtentRef<node_t> node)
{
for (auto i : *node) {
CachedExtentRef child_node;
Transaction::get_extent_ret ret;
if constexpr (std::is_base_of_v<typename internal_node_t::base_t, child_node_t>) {
ret = c.trans.get_extent(
i->get_val().maybe_relative_to(node->get_paddr()),
&child_node);
} else {
if constexpr (leaf_has_children) {
ret = c.trans.get_extent(
i->get_val().paddr.maybe_relative_to(node->get_paddr()),
&child_node);
}
}
if (ret == Transaction::get_extent_ret::PRESENT) {
if (child_node->is_stable()) {
assert(child_node->is_valid());
auto cnode = child_node->template cast<child_node_t>();
assert(cnode->has_parent_tracker());
if (node->is_pending()) {
auto &n = node->get_stable_for_key(i->get_key());
assert(cnode->get_parent_node().get() == &n);
auto pos = n.lower_bound_offset(i->get_key());
assert(pos < n.get_node_size());
assert(n.children[pos] == cnode.get());
} else {
assert(cnode->get_parent_node().get() == node.get());
assert(node->children[i->get_offset()] == cnode.get());
}
} else if (child_node->is_pending()) {
if (child_node->is_mutation_pending()) {
auto &prior = (child_node_t &)*child_node->prior_instance;
assert(prior.is_valid());
assert(prior.is_parent_valid());
if (node->is_mutation_pending()) {
auto &n = node->get_stable_for_key(i->get_key());
assert(prior.get_parent_node().get() == &n);
auto pos = n.lower_bound_offset(i->get_key());
assert(pos < n.get_node_size());
assert(n.children[pos] == &prior);
} else {
assert(prior.get_parent_node().get() == node.get());
assert(node->children[i->get_offset()] == &prior);
}
} else {
auto cnode = child_node->template cast<child_node_t>();
auto pos = node->find(i->get_key()).get_offset();
auto child = node->children[pos];
assert(child);
assert(child == cnode.get());
assert(cnode->is_parent_valid());
}
} else {
ceph_assert(!child_node->is_valid());
ceph_abort("impossible");
}
} else if (ret == Transaction::get_extent_ret::ABSENT) {
ChildableCachedExtent* child = nullptr;
if (node->is_pending()) {
auto &n = node->get_stable_for_key(i->get_key());
auto pos = n.lower_bound_offset(i->get_key());
assert(pos < n.get_node_size());
child = n.children[pos];
if (is_valid_child_ptr(child)) {
auto c = (child_node_t*)child;
assert(c->has_parent_tracker());
assert(c->get_parent_node().get() == &n);
}
} else {
child = node->children[i->get_offset()];
if (is_valid_child_ptr(child)) {
auto c = (child_node_t*)child;
assert(c->has_parent_tracker());
assert(c->get_parent_node().get() == node.get());
}
}
if (!is_valid_child_ptr(child)) {
if constexpr (
std::is_base_of_v<typename internal_node_t::base_t, child_node_t>)
{
assert(!c.cache.query_cache(i->get_val(), nullptr));
} else {
if constexpr (leaf_has_children) {
assert(!c.cache.query_cache(i->get_val().paddr, nullptr));
}
}
}
} else {
ceph_abort("impossible");
}
}
}
using check_child_trackers_ret = base_iertr::future<>;
check_child_trackers_ret check_child_trackers(
op_context_t<node_key_t> c) {
mapped_space_visitor_t checker = [c, this](
paddr_t,
node_key_t,
extent_len_t,
depth_t depth,
extent_types_t,
iterator& iter) {
if constexpr (!leaf_has_children) {
if (depth == 1) {
return seastar::now();
}
}
if (depth > 1) {
auto &node = iter.get_internal(depth).node;
assert(node->is_valid());
check_node<typename internal_node_t::base_t>(c, node);
} else {
assert(depth == 1);
auto &node = iter.leaf.node;
assert(node->is_valid());
check_node<LogicalCachedExtent>(c, node);
}
return seastar::now();
};
return seastar::do_with(
std::move(checker),
[this, c](auto &checker) {
return iterate_repeat(
c,
lower_bound(
c,
min_max_t<node_key_t>::min,
&checker),
[](auto &pos) {
if (pos.is_end()) {
return base_iertr::make_ready_future<
seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
return base_iertr::make_ready_future<
seastar::stop_iteration>(
seastar::stop_iteration::no);
},
&checker);
});
}
using iterate_repeat_ret_inner = base_iertr::future<
seastar::stop_iteration>;
template <typename F>
static base_iertr::future<> iterate_repeat(
op_context_t<node_key_t> c,
iterator_fut &&iter_fut,
F &&f,
mapped_space_visitor_t *visitor=nullptr) {
return std::move(
iter_fut
).si_then([c, visitor, f=std::forward<F>(f)](auto iter) {
return seastar::do_with(
iter,
std::move(f),
[c, visitor](auto &pos, auto &f) {
return trans_intr::repeat(
[c, visitor, &f, &pos] {
return f(
pos
).si_then([c, visitor, &pos](auto done) {
if (done == seastar::stop_iteration::yes) {
return iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
} else {
ceph_assert(!pos.is_end());
return pos.next(
c, visitor
).si_then([&pos](auto next) {
pos = next;
return iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
});
}
});
});
});
});
}
/**
* insert
*
* Inserts val at laddr with iter as a hint. If element at laddr already
* exists returns iterator to that element unchanged and returns false.
*
* Invalidates all outstanding iterators for this tree on this transaction.
*
* @param c [in] op context
* @param iter [in] hint, insertion constant if immediately prior to iter
* @param laddr [in] addr at which to insert
* @param val [in] val to insert
* @return pair<iter, bool> where iter points to element at addr, bool true
* iff element at laddr did not exist.
*/
using insert_iertr = base_iertr;
using insert_ret = insert_iertr::future<std::pair<iterator, bool>>;
insert_ret insert(
op_context_t<node_key_t> c,
iterator iter,
node_key_t laddr,
node_val_t val,
LogicalCachedExtent* nextent
) {
LOG_PREFIX(FixedKVBtree::insert);
SUBTRACET(
seastore_fixedkv_tree,
"inserting laddr {} at iter {}",
c.trans,
laddr,
iter.is_end() ? min_max_t<node_key_t>::max : iter.get_key());
return seastar::do_with(
iter,
[this, c, laddr, val, nextent](auto &ret) {
return find_insertion(
c, laddr, ret
).si_then([this, c, laddr, val, &ret, nextent] {
if (!ret.at_boundary() && ret.get_key() == laddr) {
return insert_ret(
interruptible::ready_future_marker{},
std::make_pair(ret, false));
} else {
++(get_tree_stats<self_type>(c.trans).num_inserts);
return handle_split(
c, ret
).si_then([c, laddr, val, &ret, nextent] {
if (!ret.leaf.node->is_mutable()) {
CachedExtentRef mut = c.cache.duplicate_for_write(
c.trans, ret.leaf.node
);
ret.leaf.node = mut->cast<leaf_node_t>();
}
auto iter = typename leaf_node_t::const_iterator(
ret.leaf.node.get(), ret.leaf.pos);
assert(iter == ret.leaf.node->lower_bound(laddr));
assert(iter == ret.leaf.node->end() || iter->get_key() > laddr);
assert(laddr >= ret.leaf.node->get_meta().begin &&
laddr < ret.leaf.node->get_meta().end);
ret.leaf.node->insert(iter, laddr, val, nextent);
return insert_ret(
interruptible::ready_future_marker{},
std::make_pair(ret, true));
});
}
});
});
}
insert_ret insert(
op_context_t<node_key_t> c,
node_key_t laddr,
node_val_t val,
LogicalCachedExtent* nextent) {
return lower_bound(
c, laddr
).si_then([this, c, laddr, val, nextent](auto iter) {
return this->insert(c, iter, laddr, val, nextent);
});
}
/**
* update
*
* Invalidates all outstanding iterators for this tree on this transaction.
*
* @param c [in] op context
* @param iter [in] iterator to element to update, must not be end
* @param val [in] val with which to update
* @return iterator to newly updated element
*/
using update_iertr = base_iertr;
using update_ret = update_iertr::future<iterator>;
update_ret update(
op_context_t<node_key_t> c,
iterator iter,
node_val_t val,
LogicalCachedExtent* nextent)
{
LOG_PREFIX(FixedKVBtree::update);
SUBTRACET(
seastore_fixedkv_tree,
"update element at {}",
c.trans,
iter.is_end() ? min_max_t<node_key_t>::max : iter.get_key());
if (!iter.leaf.node->is_mutable()) {
CachedExtentRef mut = c.cache.duplicate_for_write(
c.trans, iter.leaf.node
);
iter.leaf.node = mut->cast<leaf_node_t>();
}
++(get_tree_stats<self_type>(c.trans).num_updates);
iter.leaf.node->update(
iter.leaf.node->iter_idx(iter.leaf.pos),
val,
nextent);
return update_ret(
interruptible::ready_future_marker{},
iter);
}
/**
* remove
*
* Invalidates all outstanding iterators for this tree on this transaction.
*
* @param c [in] op context
* @param iter [in] iterator to element to remove, must not be end
*/
using remove_iertr = base_iertr;
using remove_ret = remove_iertr::future<>;
remove_ret remove(
op_context_t<node_key_t> c,
iterator iter)
{
LOG_PREFIX(FixedKVBtree::remove);
SUBTRACET(
seastore_fixedkv_tree,
"remove element at {}",
c.trans,
iter.is_end() ? min_max_t<node_key_t>::max : iter.get_key());
assert(!iter.is_end());
++(get_tree_stats<self_type>(c.trans).num_erases);
return seastar::do_with(
iter,
[this, c](auto &ret) {
if (!ret.leaf.node->is_mutable()) {
CachedExtentRef mut = c.cache.duplicate_for_write(
c.trans, ret.leaf.node
);
ret.leaf.node = mut->cast<leaf_node_t>();
}
ret.leaf.node->remove(
ret.leaf.node->iter_idx(ret.leaf.pos));
return handle_merge(
c, ret
);
});
}
/**
* init_cached_extent
*
* Checks whether e is live (reachable from fixed kv tree) and drops or initializes
* accordingly.
*
* Returns if e is live.
*/
using init_cached_extent_iertr = base_iertr;
using init_cached_extent_ret = init_cached_extent_iertr::future<bool>;
init_cached_extent_ret init_cached_extent(
op_context_t<node_key_t> c,
CachedExtentRef e)
{
assert(!e->is_logical());
LOG_PREFIX(FixedKVTree::init_cached_extent);
SUBTRACET(seastore_fixedkv_tree, "extent {}", c.trans, *e);
if (e->get_type() == internal_node_t::TYPE) {
auto eint = e->cast<internal_node_t>();
return lower_bound(
c, eint->get_node_meta().begin
).si_then([e, c, eint](auto iter) {
// Note, this check is valid even if iter.is_end()
LOG_PREFIX(FixedKVTree::init_cached_extent);
depth_t cand_depth = eint->get_node_meta().depth;
if (cand_depth <= iter.get_depth() &&
&*iter.get_internal(cand_depth).node == &*eint) {
SUBTRACET(
seastore_fixedkv_tree,
"extent {} is live",
c.trans,
*eint);
return true;
} else {
SUBTRACET(
seastore_fixedkv_tree,
"extent {} is not live",
c.trans,
*eint);
return false;
}
});
} else if (e->get_type() == leaf_node_t::TYPE) {
auto eleaf = e->cast<leaf_node_t>();
return lower_bound(
c, eleaf->get_node_meta().begin
).si_then([c, e, eleaf](auto iter) {
// Note, this check is valid even if iter.is_end()
LOG_PREFIX(FixedKVTree::init_cached_extent);
if (iter.leaf.node == &*eleaf) {
SUBTRACET(
seastore_fixedkv_tree,
"extent {} is live",
c.trans,
*eleaf);
return true;
} else {
SUBTRACET(
seastore_fixedkv_tree,
"extent {} is not live",
c.trans,
*eleaf);
return false;
}
});
} else {
SUBTRACET(
seastore_fixedkv_tree,
"found other extent {} type {}",
c.trans,
*e,
e->get_type());
return init_cached_extent_ret(
interruptible::ready_future_marker{},
true);
}
}
/// get_leaf_if_live: get leaf node at laddr/addr if still live
using get_leaf_if_live_iertr = base_iertr;
using get_leaf_if_live_ret = get_leaf_if_live_iertr::future<CachedExtentRef>;
get_leaf_if_live_ret get_leaf_if_live(
op_context_t<node_key_t> c,
paddr_t addr,
node_key_t laddr,
extent_len_t len)
{
LOG_PREFIX(FixedKVBtree::get_leaf_if_live);
return lower_bound(
c, laddr
).si_then([FNAME, c, addr, laddr, len](auto iter) {
if (iter.leaf.node->get_paddr() == addr) {
SUBTRACET(
seastore_fixedkv_tree,
"extent laddr {} addr {}~{} found: {}",
c.trans,
laddr,
addr,
len,
*iter.leaf.node);
return CachedExtentRef(iter.leaf.node);
} else {
SUBTRACET(
seastore_fixedkv_tree,
"extent laddr {} addr {}~{} is not live, does not match node {}",
c.trans,
laddr,
addr,
len,
*iter.leaf.node);
return CachedExtentRef();
}
});
}
/// get_internal_if_live: get internal node at laddr/addr if still live
using get_internal_if_live_iertr = base_iertr;
using get_internal_if_live_ret = get_internal_if_live_iertr::future<CachedExtentRef>;
get_internal_if_live_ret get_internal_if_live(
op_context_t<node_key_t> c,
paddr_t addr,
node_key_t laddr,
extent_len_t len)
{
LOG_PREFIX(FixedKVBtree::get_internal_if_live);
return lower_bound(
c, laddr
).si_then([FNAME, c, addr, laddr, len](auto iter) {
for (depth_t d = 2; d <= iter.get_depth(); ++d) {
CachedExtent &node = *iter.get_internal(d).node;
auto internal_node = node.cast<internal_node_t>();
if (internal_node->get_paddr() == addr) {
SUBTRACET(
seastore_fixedkv_tree,
"extent laddr {} addr {}~{} found: {}",
c.trans,
laddr,
addr,
len,
*internal_node);
assert(internal_node->get_node_meta().begin == laddr);
return CachedExtentRef(internal_node);
}
}
SUBTRACET(
seastore_fixedkv_tree,
"extent laddr {} addr {}~{} is not live, no matching internal node",
c.trans,
laddr,
addr,
len);
return CachedExtentRef();
});
}
/**
* rewrite_extent
*
* Rewrites a fresh copy of extent into transaction and updates internal
* references.
*/
using rewrite_extent_iertr = base_iertr;
using rewrite_extent_ret = rewrite_extent_iertr::future<>;
rewrite_extent_ret rewrite_extent(
op_context_t<node_key_t> c,
CachedExtentRef e) {
LOG_PREFIX(FixedKVBtree::rewrite_extent);
assert(is_lba_backref_node(e->get_type()));
auto do_rewrite = [&](auto &fixed_kv_extent) {
auto n_fixed_kv_extent = c.cache.template alloc_new_extent<
std::remove_reference_t<decltype(fixed_kv_extent)>
>(
c.trans,
fixed_kv_extent.get_length(),
fixed_kv_extent.get_user_hint(),
// get target rewrite generation
fixed_kv_extent.get_rewrite_generation());
fixed_kv_extent.get_bptr().copy_out(
0,
fixed_kv_extent.get_length(),
n_fixed_kv_extent->get_bptr().c_str());
n_fixed_kv_extent->set_modify_time(fixed_kv_extent.get_modify_time());
n_fixed_kv_extent->range = n_fixed_kv_extent->get_node_meta();
if (fixed_kv_extent.get_type() == internal_node_t::TYPE ||
leaf_node_t::do_has_children) {
if (!fixed_kv_extent.is_pending()) {
n_fixed_kv_extent->copy_sources.emplace(&fixed_kv_extent);
n_fixed_kv_extent->prior_instance = &fixed_kv_extent;
} else {
ceph_assert(fixed_kv_extent.is_mutation_pending());
n_fixed_kv_extent->copy_sources.emplace(
(typename internal_node_t::base_t*
)fixed_kv_extent.get_prior_instance().get());
n_fixed_kv_extent->children = std::move(fixed_kv_extent.children);
n_fixed_kv_extent->prior_instance = fixed_kv_extent.get_prior_instance();
n_fixed_kv_extent->adjust_ptracker_for_children();
}
}
/* This is a bit underhanded. Any relative addrs here must necessarily
* be record relative as we are rewriting a dirty extent. Thus, we
* are using resolve_relative_addrs with a (likely negative) block
* relative offset to correct them to block-relative offsets adjusted
* for our new transaction location.
*
* Upon commit, these now block relative addresses will be interpretted
* against the real final address.
*/
if (!n_fixed_kv_extent->get_paddr().is_absolute()) {
// backend_type_t::SEGMENTED
assert(n_fixed_kv_extent->get_paddr().is_record_relative());
n_fixed_kv_extent->resolve_relative_addrs(
make_record_relative_paddr(0).block_relative_to(
n_fixed_kv_extent->get_paddr()));
} // else: backend_type_t::RANDOM_BLOCK
SUBTRACET(
seastore_fixedkv_tree,
"rewriting {} into {}",
c.trans,
fixed_kv_extent,
*n_fixed_kv_extent);
return update_internal_mapping(
c,
n_fixed_kv_extent->get_node_meta().depth,
n_fixed_kv_extent->get_node_meta().begin,
e->get_paddr(),
n_fixed_kv_extent->get_paddr(),
n_fixed_kv_extent
).si_then([c, e] {
c.cache.retire_extent(c.trans, e);
});
};
CachedExtentRef n_fixed_kv_extent;
if (e->get_type() == internal_node_t::TYPE) {
auto lint = e->cast<internal_node_t>();
return do_rewrite(*lint);
} else {
assert(e->get_type() == leaf_node_t::TYPE);
auto lleaf = e->cast<leaf_node_t>();
return do_rewrite(*lleaf);
}
}
using update_internal_mapping_iertr = base_iertr;
using update_internal_mapping_ret = update_internal_mapping_iertr::future<>;
update_internal_mapping_ret update_internal_mapping(
op_context_t<node_key_t> c,
depth_t depth,
node_key_t laddr,
paddr_t old_addr,
paddr_t new_addr,
typename internal_node_t::base_ref nextent)
{
LOG_PREFIX(FixedKVBtree::update_internal_mapping);
SUBTRACET(
seastore_fixedkv_tree,
"updating laddr {} at depth {} from {} to {}, nextent {}",
c.trans,
laddr,
depth,
old_addr,
new_addr,
*nextent);
return lower_bound(
c, laddr, nullptr, depth + 1
).si_then([=, this](auto iter) {
assert(iter.get_depth() >= depth);
if (depth == iter.get_depth()) {
SUBTRACET(seastore_fixedkv_tree, "update at root", c.trans);
if (laddr != min_max_t<node_key_t>::min) {
SUBERRORT(
seastore_fixedkv_tree,
"updating root laddr {} at depth {} from {} to {},"
"laddr is not 0",
c.trans,
laddr,
depth,
old_addr,
new_addr,
get_root().get_location());
ceph_assert(0 == "impossible");
}
if (get_root().get_location() != old_addr) {
SUBERRORT(
seastore_fixedkv_tree,
"updating root laddr {} at depth {} from {} to {},"
"root addr {} does not match",
c.trans,
laddr,
depth,
old_addr,
new_addr,
get_root().get_location());
ceph_assert(0 == "impossible");
}
root_block = c.cache.duplicate_for_write(
c.trans, root_block)->template cast<RootBlock>();
get_root().set_location(new_addr);
set_root_node(nextent);
} else {
auto &parent = iter.get_internal(depth + 1);
assert(parent.node);
assert(parent.pos < parent.node->get_size());
auto piter = parent.node->iter_idx(parent.pos);
if (piter->get_key() != laddr) {
SUBERRORT(
seastore_fixedkv_tree,
"updating laddr {} at depth {} from {} to {},"
"node {} pos {} val pivot addr {} does not match",
c.trans,
laddr,
depth,
old_addr,
new_addr,
*(parent.node),
parent.pos,
piter->get_key());
ceph_assert(0 == "impossible");
}
if (piter->get_val() != old_addr) {
SUBERRORT(
seastore_fixedkv_tree,
"updating laddr {} at depth {} from {} to {},"
"node {} pos {} val addr {} does not match",
c.trans,
laddr,
depth,
old_addr,
new_addr,
*(parent.node),
parent.pos,
piter->get_val());
ceph_assert(0 == "impossible");
}
CachedExtentRef mut = c.cache.duplicate_for_write(
c.trans,
parent.node
);
typename internal_node_t::Ref mparent = mut->cast<internal_node_t>();
mparent->update(piter, new_addr, nextent.get());
/* Note, iter is now invalid as we didn't udpate either the parent
* node reference to the new mutable instance nor did we update the
* child pointer to the new node. Not a problem as we'll now just
* destruct it.
*/
}
return seastar::now();
});
}
private:
RootBlockRef root_block;
template <typename T>
using node_position_t = typename iterator::template node_position_t<T>;
using get_internal_node_iertr = base_iertr;
using get_internal_node_ret = get_internal_node_iertr::future<InternalNodeRef>;
static get_internal_node_ret get_internal_node(
op_context_t<node_key_t> c,
depth_t depth,
paddr_t offset,
node_key_t begin,
node_key_t end,
typename std::optional<node_position_t<internal_node_t>> parent_pos)
{
LOG_PREFIX(FixedKVBtree::get_internal_node);
SUBTRACET(
seastore_fixedkv_tree,
"reading internal at offset {}, depth {}, begin {}, end {}",
c.trans,
offset,
depth,
begin,
end);
assert(depth > 1);
auto init_internal = [c, depth, begin, end,
parent_pos=std::move(parent_pos)]
(internal_node_t &node) {
assert(!node.is_pending());
assert(!node.is_linked());
node.range = fixed_kv_node_meta_t<node_key_t>{begin, end, depth};
if (parent_pos) {
auto &parent = parent_pos->node;
parent->link_child(&node, parent_pos->pos);
} else {
assert(node.range.is_root());
auto root_block = c.cache.get_root_fast(c.trans);
if (root_block->is_mutation_pending()) {
auto &stable_root = (RootBlockRef&)*root_block->get_prior_instance();
link_phy_tree_root_node(stable_root, &node);
} else {
assert(!root_block->is_pending());
link_phy_tree_root_node(root_block, &node);
}
}
};
return c.cache.template get_absent_extent<internal_node_t>(
c.trans,
offset,
node_size,
init_internal
).si_then([FNAME, c, offset, init_internal, depth, begin, end](
typename internal_node_t::Ref ret) {
SUBTRACET(
seastore_fixedkv_tree,
"read internal at offset {} {}",
c.trans,
offset,
*ret);
// This can only happen during init_cached_extent
// or when backref extent being rewritten by gc space reclaiming
if (!ret->is_pending() && !ret->is_linked()) {
assert(ret->is_dirty()
|| (is_backref_node(ret->get_type())
&& ret->is_clean()));
init_internal(*ret);
}
auto meta = ret->get_meta();
if (ret->get_size()) {
ceph_assert(meta.begin <= ret->begin()->get_key());
ceph_assert(meta.end > (ret->end() - 1)->get_key());
}
ceph_assert(depth == meta.depth);
ceph_assert(begin == meta.begin);
ceph_assert(end == meta.end);
return get_internal_node_ret(
interruptible::ready_future_marker{},
ret);
});
}
using get_leaf_node_iertr = base_iertr;
using get_leaf_node_ret = get_leaf_node_iertr::future<LeafNodeRef>;
static get_leaf_node_ret get_leaf_node(
op_context_t<node_key_t> c,
paddr_t offset,
node_key_t begin,
node_key_t end,
typename std::optional<node_position_t<leaf_node_t>> parent_pos)
{
LOG_PREFIX(FixedKVBtree::get_leaf_node);
SUBTRACET(
seastore_fixedkv_tree,
"reading leaf at offset {}, begin {}, end {}",
c.trans,
offset,
begin,
end);
auto init_leaf = [c, begin, end,
parent_pos=std::move(parent_pos)]
(leaf_node_t &node) {
assert(!node.is_pending());
assert(!node.is_linked());
node.range = fixed_kv_node_meta_t<node_key_t>{begin, end, 1};
if (parent_pos) {
auto &parent = parent_pos->node;
parent->link_child(&node, parent_pos->pos);
} else {
assert(node.range.is_root());
auto root_block = c.cache.get_root_fast(c.trans);
if (root_block->is_mutation_pending()) {
auto &stable_root = (RootBlockRef&)*root_block->get_prior_instance();
link_phy_tree_root_node(stable_root, &node);
} else {
assert(!root_block->is_pending());
link_phy_tree_root_node(root_block, &node);
}
}
};
return c.cache.template get_absent_extent<leaf_node_t>(
c.trans,
offset,
node_size,
init_leaf
).si_then([FNAME, c, offset, init_leaf, begin, end]
(typename leaf_node_t::Ref ret) {
SUBTRACET(
seastore_fixedkv_tree,
"read leaf at offset {} {}",
c.trans,
offset,
*ret);
// This can only happen during init_cached_extent
// or when backref extent being rewritten by gc space reclaiming
if (!ret->is_pending() && !ret->is_linked()) {
assert(ret->is_dirty()
|| (is_backref_node(ret->get_type())
&& ret->is_clean()));
init_leaf(*ret);
}
auto meta = ret->get_meta();
if (ret->get_size()) {
ceph_assert(meta.begin <= ret->begin()->get_key());
ceph_assert(meta.end > (ret->end() - 1)->get_key());
}
ceph_assert(1 == meta.depth);
ceph_assert(begin == meta.begin);
ceph_assert(end == meta.end);
return get_leaf_node_ret(
interruptible::ready_future_marker{},
ret);
});
}
using lookup_root_iertr = base_iertr;
using lookup_root_ret = lookup_root_iertr::future<>;
lookup_root_ret lookup_root(
op_context_t<node_key_t> c,
iterator &iter,
mapped_space_visitor_t *visitor) const {
LOG_PREFIX(FixedKVBtree::lookup_root);
SUBTRACET(seastore_fixedkv_tree,
"looking up root on {}",
c.trans,
*root_block);
auto [found, fut] = get_root_node(c);
auto on_found_internal =
[this, visitor, &iter](InternalNodeRef &root_node) {
iter.get_internal(get_root().get_depth()).node = root_node;
if (visitor) (*visitor)(
root_node->get_paddr(),
root_node->get_node_meta().begin,
root_node->get_length(),
get_root().get_depth(),
internal_node_t::TYPE,
iter);
return lookup_root_iertr::now();
};
auto on_found_leaf =
[visitor, &iter, this](LeafNodeRef root_node) {
iter.leaf.node = root_node;
if (visitor) (*visitor)(
root_node->get_paddr(),
root_node->get_node_meta().begin,
root_node->get_length(),
get_root().get_depth(),
leaf_node_t::TYPE,
iter);
return lookup_root_iertr::now();
};
if (found) {
return fut.si_then(
[this, c, on_found_internal=std::move(on_found_internal),
on_found_leaf=std::move(on_found_leaf)](auto root) {
LOG_PREFIX(FixedKVBtree::lookup_root);
ceph_assert(root);
SUBTRACET(seastore_fixedkv_tree,
"got root node on {}, res: {}",
c.trans,
*root_block,
*root);
if (get_root().get_depth() > 1) {
auto root_node = root->template cast<internal_node_t>();
return on_found_internal(root_node);
} else {
auto root_node = root->template cast<leaf_node_t>();
return on_found_leaf(root_node);
}
});
} else {
if (get_root().get_depth() > 1) {
return get_internal_node(
c,
get_root().get_depth(),
get_root().get_location(),
min_max_t<node_key_t>::min,
min_max_t<node_key_t>::max,
std::nullopt
).si_then([on_found=std::move(on_found_internal)](InternalNodeRef root_node) {
return on_found(root_node);
});
} else {
return get_leaf_node(
c,
get_root().get_location(),
min_max_t<node_key_t>::min,
min_max_t<node_key_t>::max,
std::nullopt
).si_then([on_found=std::move(on_found_leaf)](LeafNodeRef root_node) {
return on_found(root_node);
});
}
}
}
using lookup_internal_level_iertr = base_iertr;
using lookup_internal_level_ret = lookup_internal_level_iertr::future<>;
template <typename F>
static lookup_internal_level_ret lookup_internal_level(
op_context_t<node_key_t> c,
depth_t depth,
iterator &iter,
F &f,
mapped_space_visitor_t *visitor
) {
assert(depth > 1);
auto &parent_entry = iter.get_internal(depth + 1);
auto parent = parent_entry.node;
auto node_iter = parent->iter_idx(parent_entry.pos);
auto on_found = [depth, visitor, &iter, &f](InternalNodeRef node) {
auto &entry = iter.get_internal(depth);
entry.node = node;
auto node_iter = f(*node);
assert(node_iter != node->end());
entry.pos = node_iter->get_offset();
if (visitor)
(*visitor)(
node->get_paddr(),
node->get_node_meta().begin,
node->get_length(),
depth,
node->get_type(),
iter);
return seastar::now();
};
auto v = parent->template get_child<internal_node_t>(c, node_iter);
if (v.has_child()) {
return v.get_child_fut().safe_then(
[on_found=std::move(on_found), node_iter, c,
parent_entry](auto child) mutable {
LOG_PREFIX(FixedKVBtree::lookup_internal_level);
SUBTRACET(seastore_fixedkv_tree,
"got child on {}, pos: {}, res: {}",
c.trans,
*parent_entry.node,
parent_entry.pos,
*child);
auto &cnode = (typename internal_node_t::base_t &)*child;
assert(cnode.get_node_meta().begin == node_iter.get_key());
assert(cnode.get_node_meta().end > node_iter.get_key());
return on_found(child->template cast<internal_node_t>());
});
}
auto child_pos = v.get_child_pos();
auto next_iter = node_iter + 1;
auto begin = node_iter->get_key();
auto end = next_iter == parent->end()
? parent->get_node_meta().end
: next_iter->get_key();
return get_internal_node(
c,
depth,
node_iter->get_val().maybe_relative_to(parent->get_paddr()),
begin,
end,
std::make_optional<node_position_t<internal_node_t>>(
child_pos.template get_parent<internal_node_t>(),
child_pos.get_pos())
).si_then([on_found=std::move(on_found)](InternalNodeRef node) {
return on_found(node);
});
}
using lookup_leaf_iertr = base_iertr;
using lookup_leaf_ret = lookup_leaf_iertr::future<>;
template <typename F>
static lookup_internal_level_ret lookup_leaf(
op_context_t<node_key_t> c,
iterator &iter,
F &f,
mapped_space_visitor_t *visitor
) {
auto &parent_entry = iter.get_internal(2);
auto parent = parent_entry.node;
assert(parent);
auto node_iter = parent->iter_idx(parent_entry.pos);
auto on_found = [visitor, &iter, &f](LeafNodeRef node) {
iter.leaf.node = node;
auto node_iter = f(*node);
iter.leaf.pos = node_iter->get_offset();
if (visitor)
(*visitor)(
node->get_paddr(),
node->get_node_meta().begin,
node->get_length(),
1,
node->get_type(),
iter);
return seastar::now();
};
auto v = parent->template get_child<leaf_node_t>(c, node_iter);
if (v.has_child()) {
return v.get_child_fut().safe_then(
[on_found=std::move(on_found), node_iter, c,
parent_entry](auto child) mutable {
LOG_PREFIX(FixedKVBtree::lookup_leaf);
SUBTRACET(seastore_fixedkv_tree,
"got child on {}, pos: {}, res: {}",
c.trans,
*parent_entry.node,
parent_entry.pos,
*child);
auto &cnode = (typename internal_node_t::base_t &)*child;
assert(cnode.get_node_meta().begin == node_iter.get_key());
assert(cnode.get_node_meta().end > node_iter.get_key());
return on_found(child->template cast<leaf_node_t>());
});
}
auto child_pos = v.get_child_pos();
auto next_iter = node_iter + 1;
auto begin = node_iter->get_key();
auto end = next_iter == parent->end()
? parent->get_node_meta().end
: next_iter->get_key();
return get_leaf_node(
c,
node_iter->get_val().maybe_relative_to(parent->get_paddr()),
begin,
end,
std::make_optional<node_position_t<leaf_node_t>>(
child_pos.template get_parent<leaf_node_t>(),
child_pos.get_pos())
).si_then([on_found=std::move(on_found)](LeafNodeRef node) {
return on_found(node);
});
}
/**
* lookup_depth_range
*
* Performs node lookups on depths [from, to) using li and ll to
* specific target at each level. Note, may leave the iterator
* at_boundary(), call handle_boundary() prior to returning out
* lf FixedKVBtree.
*/
using lookup_depth_range_iertr = base_iertr;
using lookup_depth_range_ret = lookup_depth_range_iertr::future<>;
template <typename LI, typename LL>
static lookup_depth_range_ret lookup_depth_range(
op_context_t<node_key_t> c, ///< [in] context
iterator &iter, ///< [in,out] iterator to populate
depth_t from, ///< [in] from inclusive
depth_t to, ///< [in] to exclusive, (to <= from, to == from is a noop)
LI &li, ///< [in] internal->iterator
LL &ll, ///< [in] leaf->iterator
mapped_space_visitor_t *visitor ///< [in] mapped space visitor
) {
LOG_PREFIX(FixedKVBtree::lookup_depth_range);
SUBTRACET(seastore_fixedkv_tree, "{} -> {}", c.trans, from, to);
return seastar::do_with(
from,
[c, to, visitor, &iter, &li, &ll](auto &d) {
return trans_intr::repeat(
[c, to, visitor, &iter, &li, &ll, &d] {
if (d > to) {
return [&] {
if (d > 1) {
return lookup_internal_level(
c,
d,
iter,
li,
visitor);
} else {
assert(d == 1);
return lookup_leaf(
c,
iter,
ll,
visitor);
}
}().si_then([&d] {
--d;
return lookup_depth_range_iertr::make_ready_future<
seastar::stop_iteration
>(seastar::stop_iteration::no);
});
} else {
return lookup_depth_range_iertr::make_ready_future<
seastar::stop_iteration
>(seastar::stop_iteration::yes);
}
});
});
}
using lookup_iertr = base_iertr;
using lookup_ret = lookup_iertr::future<iterator>;
template <typename LI, typename LL>
lookup_ret lookup(
op_context_t<node_key_t> c,
LI &&lookup_internal,
LL &&lookup_leaf,
depth_t min_depth,
mapped_space_visitor_t *visitor
) const {
LOG_PREFIX(FixedKVBtree::lookup);
assert(min_depth > 0);
return seastar::do_with(
iterator{get_root().get_depth()},
std::forward<LI>(lookup_internal),
std::forward<LL>(lookup_leaf),
[FNAME, this, visitor, c, min_depth](auto &iter, auto &li, auto &ll) {
return lookup_root(
c, iter, visitor
).si_then([FNAME, this, visitor, c, &iter, &li, &ll, min_depth] {
if (iter.get_depth() > 1) {
auto &root_entry = *(iter.internal.rbegin());
root_entry.pos = li(*(root_entry.node)).get_offset();
} else {
auto &root_entry = iter.leaf;
auto riter = ll(*(root_entry.node));
root_entry.pos = riter->get_offset();
}
SUBTRACET(seastore_fixedkv_tree, "got root, depth {}",
c.trans, get_root().get_depth());
return lookup_depth_range(
c,
iter,
get_root().get_depth() - 1,
min_depth - 1,
li,
ll,
visitor
).si_then([c, visitor, &iter, min_depth] {
// It's only when the lookup is triggered by
// update_internal_mapping() that min_depth is
// NOT 1
if (min_depth == 1 && iter.at_boundary()) {
return iter.handle_boundary(c, visitor);
} else {
return lookup_iertr::now();
}
});
}).si_then([&iter] {
return std::move(iter);
});
});
}
/**
* find_insertion
*
* Prepare iter for insertion. iter should begin pointing at
* the valid insertion point (lower_bound(laddr)).
*
* Upon completion, iter will point at the
* position at which laddr should be inserted. iter may, upon completion,
* point at the end of a leaf other than the end leaf if that's the correct
* insertion point.
*/
using find_insertion_iertr = base_iertr;
using find_insertion_ret = find_insertion_iertr::future<>;
static find_insertion_ret find_insertion(
op_context_t<node_key_t> c,
node_key_t laddr,
iterator &iter)
{
assert(iter.is_end() || iter.get_key() >= laddr);
if (!iter.is_end() && iter.get_key() == laddr) {
return seastar::now();
} else if (iter.leaf.node->get_node_meta().begin <= laddr) {
#ifndef NDEBUG
auto p = iter;
if (p.leaf.pos > 0) {
--p.leaf.pos;
assert(p.get_key() < laddr);
}
#endif
return seastar::now();
} else {
assert(iter.leaf.pos == 0);
return iter.prev(
c
).si_then([laddr, &iter](auto p) {
boost::ignore_unused(laddr); // avoid clang warning;
assert(p.leaf.node->get_node_meta().begin <= laddr);
assert(p.get_key() < laddr);
// Note, this is specifically allowed to violate the iterator
// invariant that pos is a valid index for the node in the event
// that the insertion point is at the end of a node.
p.leaf.pos++;
assert(p.at_boundary());
iter = p;
return seastar::now();
});
}
}
/**
* handle_split
*
* Split nodes in iter as needed for insertion. First, scan iter from leaf
* to find first non-full level. Then, split from there towards leaf.
*
* Upon completion, iter will point at the newly split insertion point. As
* with find_insertion, iter's leaf pointer may be end without iter being
* end.
*/
using handle_split_iertr = base_iertr;
using handle_split_ret = handle_split_iertr::future<>;
handle_split_ret handle_split(
op_context_t<node_key_t> c,
iterator &iter)
{
LOG_PREFIX(FixedKVBtree::handle_split);
depth_t split_from = iter.check_split();
SUBTRACET(seastore_fixedkv_tree, "split_from {}, depth {}", c.trans, split_from, iter.get_depth());
if (split_from == iter.get_depth()) {
auto nroot = c.cache.template alloc_new_extent<internal_node_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
fixed_kv_node_meta_t<node_key_t> meta{
min_max_t<node_key_t>::min, min_max_t<node_key_t>::max, iter.get_depth() + 1};
nroot->set_meta(meta);
nroot->range = meta;
nroot->journal_insert(
nroot->begin(),
min_max_t<node_key_t>::min,
get_root().get_location(),
nullptr);
iter.internal.push_back({nroot, 0});
get_tree_stats<self_type>(c.trans).depth = iter.get_depth();
get_tree_stats<self_type>(c.trans).extents_num_delta++;
root_block = c.cache.duplicate_for_write(
c.trans, root_block)->template cast<RootBlock>();
get_root().set_location(nroot->get_paddr());
get_root().set_depth(iter.get_depth());
ceph_assert(get_root().get_depth() <= MAX_FIXEDKVBTREE_DEPTH);
set_root_node(nroot);
}
/* pos may be either node_position_t<leaf_node_t> or
* node_position_t<internal_node_t> */
auto split_level = [&](auto &parent_pos, auto &pos) {
LOG_PREFIX(FixedKVBtree::handle_split);
auto [left, right, pivot] = pos.node->make_split_children(c);
auto parent_node = parent_pos.node;
auto parent_iter = parent_pos.get_iter();
parent_node->update(
parent_iter,
left->get_paddr(),
left.get());
parent_node->insert(
parent_iter + 1,
pivot,
right->get_paddr(),
right.get());
SUBTRACET(
seastore_fixedkv_tree,
"splitted {} into left: {}, right: {}",
c.trans,
*pos.node,
*left,
*right);
c.cache.retire_extent(c.trans, pos.node);
get_tree_stats<self_type>(c.trans).extents_num_delta++;
return std::make_pair(left, right);
};
for (; split_from > 0; --split_from) {
auto &parent_pos = iter.get_internal(split_from + 1);
if (!parent_pos.node->is_mutable()) {
parent_pos.node = c.cache.duplicate_for_write(
c.trans, parent_pos.node
)->template cast<internal_node_t>();
}
if (split_from > 1) {
auto &pos = iter.get_internal(split_from);
SUBTRACET(
seastore_fixedkv_tree,
"splitting internal {} at depth {}, parent: {} at pos: {}",
c.trans,
*pos.node,
split_from,
*parent_pos.node,
parent_pos.pos);
auto [left, right] = split_level(parent_pos, pos);
if (pos.pos < left->get_size()) {
pos.node = left;
} else {
pos.node = right;
pos.pos -= left->get_size();
parent_pos.pos += 1;
}
} else {
auto &pos = iter.leaf;
SUBTRACET(
seastore_fixedkv_tree,
"splitting leaf {}, parent: {} at pos: {}",
c.trans,
*pos.node,
*parent_pos.node,
parent_pos.pos);
auto [left, right] = split_level(parent_pos, pos);
/* right->get_node_meta().begin == pivot == right->begin()->get_key()
* Thus, if pos.pos == left->get_size(), we want iter to point to
* left with pos.pos at the end rather than right with pos.pos = 0
* since the insertion would be to the left of the first element
* of right and thus necessarily less than right->get_node_meta().begin.
*/
if (pos.pos <= left->get_size()) {
pos.node = left;
} else {
pos.node = right;
pos.pos -= left->get_size();
parent_pos.pos += 1;
}
}
}
return seastar::now();
}
using handle_merge_iertr = base_iertr;
using handle_merge_ret = handle_merge_iertr::future<>;
handle_merge_ret handle_merge(
op_context_t<node_key_t> c,
iterator &iter)
{
LOG_PREFIX(FixedKVBtree::handle_merge);
if (iter.get_depth() == 1 ||
!iter.leaf.node->below_min_capacity()) {
SUBTRACET(
seastore_fixedkv_tree,
"no need to merge leaf, leaf size {}, depth {}",
c.trans,
iter.leaf.node->get_size(),
iter.get_depth());
return seastar::now();
}
return seastar::do_with(
depth_t{1},
[FNAME, this, c, &iter](auto &to_merge) {
return trans_intr::repeat(
[FNAME, this, c, &iter, &to_merge] {
SUBTRACET(
seastore_fixedkv_tree,
"merging depth {}",
c.trans,
to_merge);
auto &parent_pos = iter.get_internal(to_merge + 1);
auto merge_fut = handle_merge_iertr::now();
if (to_merge > 1) {
auto &pos = iter.get_internal(to_merge);
merge_fut = merge_level(c, to_merge, parent_pos, pos);
} else {
auto &pos = iter.leaf;
merge_fut = merge_level(c, to_merge, parent_pos, pos);
}
return merge_fut.si_then([FNAME, this, c, &iter, &to_merge] {
++to_merge;
auto &pos = iter.get_internal(to_merge);
if (to_merge == iter.get_depth()) {
if (pos.node->get_size() == 1) {
SUBTRACET(seastore_fixedkv_tree, "collapsing root", c.trans);
c.cache.retire_extent(c.trans, pos.node);
assert(pos.pos == 0);
auto node_iter = pos.get_iter();
iter.internal.pop_back();
get_tree_stats<self_type>(c.trans).depth = iter.get_depth();
get_tree_stats<self_type>(c.trans).extents_num_delta--;
root_block = c.cache.duplicate_for_write(
c.trans, root_block
)->template cast<RootBlock>();
get_root().set_location(
node_iter->get_val().maybe_relative_to(pos.node->get_paddr()));
get_root().set_depth(iter.get_depth());
if (iter.get_depth() > 1) {
auto root_node = iter.get_internal(iter.get_depth()).node;
set_root_node(root_node);
} else {
set_root_node(iter.leaf.node);
}
} else {
SUBTRACET(seastore_fixedkv_tree, "no need to collapse root", c.trans);
}
return seastar::stop_iteration::yes;
} else if (pos.node->below_min_capacity()) {
SUBTRACET(
seastore_fixedkv_tree,
"continuing, next node {} depth {} at min",
c.trans,
*pos.node,
to_merge);
return seastar::stop_iteration::no;
} else {
SUBTRACET(
seastore_fixedkv_tree,
"complete, next node {} depth {} not min",
c.trans,
*pos.node,
to_merge);
return seastar::stop_iteration::yes;
}
});
});
});
}
template <typename NodeType,
std::enable_if_t<std::is_same_v<NodeType, leaf_node_t>, int> = 0>
base_iertr::future<typename NodeType::Ref> get_node(
op_context_t<node_key_t> c,
depth_t depth,
paddr_t addr,
node_key_t begin,
node_key_t end,
typename std::optional<node_position_t<leaf_node_t>> parent_pos) {
assert(depth == 1);
return get_leaf_node(c, addr, begin, end, std::move(parent_pos));
}
template <typename NodeType,
std::enable_if_t<std::is_same_v<NodeType, internal_node_t>, int> = 0>
base_iertr::future<typename NodeType::Ref> get_node(
op_context_t<node_key_t> c,
depth_t depth,
paddr_t addr,
node_key_t begin,
node_key_t end,
typename std::optional<node_position_t<internal_node_t>> parent_pos) {
return get_internal_node(c, depth, addr, begin, end, std::move(parent_pos));
}
template <typename NodeType>
handle_merge_ret merge_level(
op_context_t<node_key_t> c,
depth_t depth,
node_position_t<internal_node_t> &parent_pos,
node_position_t<NodeType> &pos)
{
LOG_PREFIX(FixedKVBtree::merge_level);
if (!parent_pos.node->is_mutable()) {
parent_pos.node = c.cache.duplicate_for_write(
c.trans, parent_pos.node
)->template cast<internal_node_t>();
}
auto iter = parent_pos.get_iter();
assert(iter.get_offset() < parent_pos.node->get_size());
bool donor_is_left = ((iter.get_offset() + 1) == parent_pos.node->get_size());
auto donor_iter = donor_is_left ? (iter - 1) : (iter + 1);
auto next_iter = donor_iter + 1;
auto begin = donor_iter->get_key();
auto end = next_iter == parent_pos.node->end()
? parent_pos.node->get_node_meta().end
: next_iter->get_key();
SUBTRACET(seastore_fixedkv_tree, "parent: {}, node: {}", c.trans, *parent_pos.node, *pos.node);
auto do_merge = [c, iter, donor_iter, donor_is_left, &parent_pos, &pos](
typename NodeType::Ref donor) {
LOG_PREFIX(FixedKVBtree::merge_level);
auto [l, r] = donor_is_left ?
std::make_pair(donor, pos.node) : std::make_pair(pos.node, donor);
auto [liter, riter] = donor_is_left ?
std::make_pair(donor_iter, iter) : std::make_pair(iter, donor_iter);
if (donor->at_min_capacity()) {
auto replacement = l->make_full_merge(c, r);
parent_pos.node->update(
liter,
replacement->get_paddr(),
replacement.get());
parent_pos.node->remove(riter);
pos.node = replacement;
if (donor_is_left) {
pos.pos += l->get_size();
parent_pos.pos--;
}
SUBTRACET(seastore_fixedkv_tree, "l: {}, r: {}, replacement: {}", c.trans, *l, *r, *replacement);
c.cache.retire_extent(c.trans, l);
c.cache.retire_extent(c.trans, r);
get_tree_stats<self_type>(c.trans).extents_num_delta--;
} else {
LOG_PREFIX(FixedKVBtree::merge_level);
auto [replacement_l, replacement_r, pivot] =
l->make_balanced(
c,
r,
!donor_is_left);
parent_pos.node->update(
liter,
replacement_l->get_paddr(),
replacement_l.get());
parent_pos.node->replace(
riter,
pivot,
replacement_r->get_paddr(),
replacement_r.get());
if (donor_is_left) {
assert(parent_pos.pos > 0);
parent_pos.pos--;
}
auto orig_position = donor_is_left ?
l->get_size() + pos.pos :
pos.pos;
if (orig_position < replacement_l->get_size()) {
pos.node = replacement_l;
pos.pos = orig_position;
} else {
parent_pos.pos++;
pos.node = replacement_r;
pos.pos = orig_position - replacement_l->get_size();
}
SUBTRACET(
seastore_fixedkv_tree,
"l: {}, r: {}, replacement_l: {}, replacement_r: {}",
c.trans, *l, *r, *replacement_l, *replacement_r);
c.cache.retire_extent(c.trans, l);
c.cache.retire_extent(c.trans, r);
}
return seastar::now();
};
auto v = parent_pos.node->template get_child<NodeType>(c, donor_iter);
if (v.has_child()) {
return v.get_child_fut().safe_then(
[do_merge=std::move(do_merge), &pos,
donor_iter, donor_is_left, c, parent_pos](auto child) mutable {
LOG_PREFIX(FixedKVBtree::merge_level);
SUBTRACET(seastore_fixedkv_tree,
"got child on {}, pos: {}, res: {}",
c.trans,
*parent_pos.node,
donor_iter.get_offset(),
*child);
auto &node = (typename internal_node_t::base_t&)*child;
assert(donor_is_left ?
node.get_node_meta().end == pos.node->get_node_meta().begin :
node.get_node_meta().begin == pos.node->get_node_meta().end);
assert(node.get_node_meta().begin == donor_iter.get_key());
assert(node.get_node_meta().end > donor_iter.get_key());
return do_merge(child->template cast<NodeType>());
});
}
auto child_pos = v.get_child_pos();
return get_node<NodeType>(
c,
depth,
donor_iter.get_val().maybe_relative_to(parent_pos.node->get_paddr()),
begin,
end,
std::make_optional<node_position_t<NodeType>>(
child_pos.template get_parent<NodeType>(),
child_pos.get_pos())
).si_then([do_merge=std::move(do_merge)](typename NodeType::Ref donor) {
return do_merge(donor);
});
}
};
template <typename T>
struct is_fixed_kv_tree : std::false_type {};
template <
typename node_key_t,
typename node_val_t,
typename internal_node_t,
typename leaf_node_t,
typename pin_t,
size_t node_size,
bool leaf_has_children>
struct is_fixed_kv_tree<
FixedKVBtree<
node_key_t,
node_val_t,
internal_node_t,
leaf_node_t,
pin_t,
node_size,
leaf_has_children>> : std::true_type {};
template <
typename tree_type_t,
typename node_key_t,
typename F,
std::enable_if_t<is_fixed_kv_tree<tree_type_t>::value, int> = 0>
auto with_btree(
Cache &cache,
op_context_t<node_key_t> c,
F &&f) {
return cache.get_root(
c.trans
).si_then([f=std::forward<F>(f)](RootBlockRef croot) mutable {
return seastar::do_with(
tree_type_t(croot),
[f=std::move(f)](auto &btree) mutable {
return f(btree);
});
});
}
template <
typename tree_type_t,
typename State,
typename node_key_t,
typename F,
std::enable_if_t<is_fixed_kv_tree<tree_type_t>::value, int> = 0>
auto with_btree_state(
Cache &cache,
op_context_t<node_key_t> c,
State &&init,
F &&f) {
return seastar::do_with(
std::forward<State>(init),
[&cache, c, f=std::forward<F>(f)](auto &state) mutable {
return with_btree<tree_type_t>(
cache,
c,
[&state, f=std::move(f)](auto &btree) mutable {
return f(btree, state);
}).si_then([&state] {
return seastar::make_ready_future<State>(std::move(state));
});
});
}
template <
typename tree_type_t,
typename State,
typename node_key_t,
typename F,
std::enable_if_t<is_fixed_kv_tree<tree_type_t>::value, int> = 0>
auto with_btree_state(
Cache &cache,
op_context_t<node_key_t> c,
F &&f) {
return crimson::os::seastore::with_btree_state<tree_type_t, State>(
cache, c, State{}, std::forward<F>(f));
}
template <
typename tree_type_t,
typename Ret,
typename node_key_t,
typename F>
auto with_btree_ret(
Cache &cache,
op_context_t<node_key_t> c,
F &&f) {
return with_btree_state<tree_type_t, Ret>(
cache,
c,
[f=std::forward<F>(f)](auto &btree, auto &ret) mutable {
return f(
btree
).si_then([&ret](auto &&_ret) {
ret = std::move(_ret);
});
});
}
}
| 69,399 | 29.926916 | 105 | h |
null | ceph-main/src/crimson/os/seastore/btree/fixed_kv_node.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/btree/fixed_kv_node.h"
namespace crimson::os::seastore {
bool is_valid_child_ptr(ChildableCachedExtent* child) {
return child != nullptr && child != RESERVATION_PTR;
}
} // namespace crimson::os::seastore
| 339 | 25.153846 | 70 | cc |
null | ceph-main/src/crimson/os/seastore/btree/fixed_kv_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <sys/mman.h>
#include <memory>
#include <string.h>
#include "include/buffer.h"
#include "crimson/common/fixed_kv_node_layout.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/btree/btree_range_pin.h"
#include "crimson/os/seastore/btree/fixed_kv_btree.h"
#include "crimson/os/seastore/root_block.h"
namespace crimson::os::seastore {
/**
* FixedKVNode
*
* Base class enabling recursive lookup between internal and leaf nodes.
*/
template <typename node_key_t>
struct FixedKVNode : ChildableCachedExtent {
using FixedKVNodeRef = TCachedExtentRef<FixedKVNode>;
fixed_kv_node_meta_t<node_key_t> range;
struct copy_source_cmp_t {
using is_transparent = node_key_t;
bool operator()(const FixedKVNodeRef &l, const FixedKVNodeRef &r) const {
assert(l->range.end <= r->range.begin
|| r->range.end <= l->range.begin
|| (l->range.begin == r->range.begin
&& l->range.end == r->range.end));
return l->range.begin < r->range.begin;
}
bool operator()(const node_key_t &l, const FixedKVNodeRef &r) const {
return l < r->range.begin;
}
bool operator()(const FixedKVNodeRef &l, const node_key_t &r) const {
return l->range.begin < r;
}
};
/*
*
* Nodes of fixed-kv-btree connect to their child nodes by pointers following
* invariants below:
*
* 1. if nodes are stable:
* a. parent points at the node's stable parent
* b. prior_instance is empty
* c. child pointers point at stable children. Child resolution is done
* directly via this array.
* c. copy_sources is empty
* 2. if nodes are mutation_pending:
* a. parent is empty and needs to be fixed upon commit
* b. prior_instance points to its stable version
* c. child pointers are null except for initial_pending() children of
* this transaction. Child resolution is done by first checking this
* array, and then recursively resolving via the parent. We copy child
* pointers from parent on commit.
* c. copy_sources is empty
* 3. if nodes are initial_pending
* a. parent points at its pending parent on this transaction (must exist)
* b. prior_instance is empty or, if it's the result of rewrite, points to
* its stable predecessor
* c. child pointers are null except for initial_pending() children of
* this transaction (live due to 3a below). Child resolution is done
* by first checking this array, and then recursively resolving via
* the correct copy_sources entry. We copy child pointers from copy_sources
* on commit.
* d. copy_sources contains the set of stable nodes at the same tree-level(only
* its "prior_instance" if the node is the result of a rewrite), with which
* the lba range of this node overlaps.
*/
std::vector<ChildableCachedExtent*> children;
std::set<FixedKVNodeRef, copy_source_cmp_t> copy_sources;
uint16_t capacity = 0;
parent_tracker_t* my_tracker = nullptr;
RootBlockRef root_block;
bool is_linked() {
assert(!has_parent_tracker() || !(bool)root_block);
return (bool)has_parent_tracker() || (bool)root_block;
}
FixedKVNode(uint16_t capacity, ceph::bufferptr &&ptr)
: ChildableCachedExtent(std::move(ptr)),
children(capacity, nullptr),
capacity(capacity) {}
FixedKVNode(const FixedKVNode &rhs)
: ChildableCachedExtent(rhs),
range(rhs.range),
children(rhs.capacity, nullptr),
capacity(rhs.capacity) {}
virtual fixed_kv_node_meta_t<node_key_t> get_node_meta() const = 0;
virtual uint16_t get_node_size() const = 0;
virtual ~FixedKVNode() = default;
virtual node_key_t get_key_from_idx(uint16_t idx) const = 0;
template<typename iter_t>
void update_child_ptr(iter_t iter, ChildableCachedExtent* child) {
children[iter.get_offset()] = child;
set_child_ptracker(child);
}
virtual bool is_leaf_and_has_children() const = 0;
template<typename iter_t>
void insert_child_ptr(iter_t iter, ChildableCachedExtent* child) {
auto raw_children = children.data();
auto offset = iter.get_offset();
std::memmove(
&raw_children[offset + 1],
&raw_children[offset],
(get_node_size() - offset) * sizeof(ChildableCachedExtent*));
if (child) {
children[offset] = child;
set_child_ptracker(child);
} else {
// this can only happen when reserving lba spaces
ceph_assert(is_leaf_and_has_children());
// this is to avoid mistakenly copying pointers from
// copy sources when committing this lba node, because
// we rely on pointers' "nullness" to avoid copying
// pointers for updated values
children[offset] = RESERVATION_PTR;
}
}
template<typename iter_t>
void remove_child_ptr(iter_t iter) {
LOG_PREFIX(FixedKVNode::remove_child_ptr);
auto raw_children = children.data();
auto offset = iter.get_offset();
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, total size {}, extent {}",
this->pending_for_transaction,
offset,
get_node_size(),
(void*)raw_children[offset]);
// parent tracker of the child being removed will be
// reset when the child is invalidated, so no need to
// reset it here
std::memmove(
&raw_children[offset],
&raw_children[offset + 1],
(get_node_size() - offset - 1) * sizeof(ChildableCachedExtent*));
}
FixedKVNode& get_stable_for_key(node_key_t key) {
ceph_assert(is_pending());
if (is_mutation_pending()) {
return (FixedKVNode&)*get_prior_instance();
} else {
ceph_assert(!copy_sources.empty());
auto it = copy_sources.upper_bound(key);
it--;
auto ©_source = *it;
ceph_assert(copy_source->get_node_meta().is_in_range(key));
return *copy_source;
}
}
static void push_copy_sources(
FixedKVNode &dest,
FixedKVNode &src)
{
ceph_assert(dest.is_initial_pending());
if (!src.is_pending()) {
dest.copy_sources.emplace(&src);
} else if (src.is_mutation_pending()) {
dest.copy_sources.emplace(
src.get_prior_instance()->template cast<FixedKVNode>());
} else {
ceph_assert(src.is_initial_pending());
dest.copy_sources.insert(
src.copy_sources.begin(),
src.copy_sources.end());
}
}
virtual uint16_t get_node_split_pivot() = 0;
static void move_child_ptrs(
FixedKVNode &dest,
FixedKVNode &src,
size_t dest_start,
size_t src_start,
size_t src_end)
{
std::memmove(
dest.children.data() + dest_start,
src.children.data() + src_start,
(src_end - src_start) * sizeof(ChildableCachedExtent*));
ceph_assert(src_start < src_end);
ceph_assert(src.children.size() >= src_end);
for (auto it = src.children.begin() + src_start;
it != src.children.begin() + src_end;
it++)
{
auto child = *it;
if (is_valid_child_ptr(child)) {
dest.set_child_ptracker(child);
}
}
}
void link_child(ChildableCachedExtent* child, uint16_t pos) {
assert(pos < get_node_size());
assert(child);
ceph_assert(!is_pending());
ceph_assert(child->is_valid() && !child->is_pending());
assert(!children[pos]);
children[pos] = child;
set_child_ptracker(child);
}
virtual get_child_ret_t<LogicalCachedExtent>
get_logical_child(op_context_t<node_key_t> c, uint16_t pos) = 0;
template <typename T, typename iter_t>
get_child_ret_t<T> get_child(op_context_t<node_key_t> c, iter_t iter) {
auto pos = iter.get_offset();
assert(children.capacity());
auto child = children[pos];
if (is_valid_child_ptr(child)) {
ceph_assert(child->get_type() == T::TYPE);
return c.cache.template get_extent_viewable_by_trans<T>(c.trans, (T*)child);
} else if (is_pending()) {
auto key = iter.get_key();
auto &sparent = get_stable_for_key(key);
auto spos = sparent.child_pos_for_key(key);
auto child = sparent.children[spos];
if (is_valid_child_ptr(child)) {
ceph_assert(child->get_type() == T::TYPE);
return c.cache.template get_extent_viewable_by_trans<T>(c.trans, (T*)child);
} else {
return child_pos_t(&sparent, spos);
}
} else {
return child_pos_t(this, pos);
}
}
void split_child_ptrs(
FixedKVNode &left,
FixedKVNode &right)
{
assert(!left.my_tracker);
assert(!right.my_tracker);
push_copy_sources(left, *this);
push_copy_sources(right, *this);
if (is_pending()) {
uint16_t pivot = get_node_split_pivot();
move_child_ptrs(left, *this, 0, 0, pivot);
move_child_ptrs(right, *this, 0, pivot, get_node_size());
my_tracker = nullptr;
}
}
void merge_child_ptrs(
FixedKVNode &left,
FixedKVNode &right)
{
ceph_assert(!my_tracker);
push_copy_sources(*this, left);
push_copy_sources(*this, right);
if (left.is_pending()) {
move_child_ptrs(*this, left, 0, 0, left.get_node_size());
left.my_tracker = nullptr;
}
if (right.is_pending()) {
move_child_ptrs(*this, right, left.get_node_size(), 0, right.get_node_size());
right.my_tracker = nullptr;
}
}
static void balance_child_ptrs(
FixedKVNode &left,
FixedKVNode &right,
bool prefer_left,
FixedKVNode &replacement_left,
FixedKVNode &replacement_right)
{
size_t l_size = left.get_node_size();
size_t r_size = right.get_node_size();
size_t total = l_size + r_size;
size_t pivot_idx = (l_size + r_size) / 2;
if (total % 2 && prefer_left) {
pivot_idx++;
}
assert(!replacement_left.my_tracker);
assert(!replacement_right.my_tracker);
if (pivot_idx < l_size) {
// deal with left
push_copy_sources(replacement_left, left);
push_copy_sources(replacement_right, left);
if (left.is_pending()) {
move_child_ptrs(replacement_left, left, 0, 0, pivot_idx);
move_child_ptrs(replacement_right, left, 0, pivot_idx, l_size);
left.my_tracker = nullptr;
}
// deal with right
push_copy_sources(replacement_right, right);
if (right.is_pending()) {
move_child_ptrs(replacement_right, right, l_size - pivot_idx, 0, r_size);
right.my_tracker= nullptr;
}
} else {
// deal with left
push_copy_sources(replacement_left, left);
if (left.is_pending()) {
move_child_ptrs(replacement_left, left, 0, 0, l_size);
left.my_tracker = nullptr;
}
// deal with right
push_copy_sources(replacement_left, right);
push_copy_sources(replacement_right, right);
if (right.is_pending()) {
move_child_ptrs(replacement_left, right, l_size, 0, pivot_idx - l_size);
move_child_ptrs(replacement_right, right, 0, pivot_idx - l_size, r_size);
right.my_tracker= nullptr;
}
}
}
void set_parent_tracker_from_prior_instance() {
assert(is_mutation_pending());
auto &prior = (FixedKVNode&)(*get_prior_instance());
if (range.is_root()) {
ceph_assert(prior.root_block);
ceph_assert(pending_for_transaction);
root_block = prior.root_block;
link_phy_tree_root_node(root_block, this);
return;
}
ceph_assert(!root_block);
take_prior_parent_tracker();
assert(is_parent_valid());
auto parent = get_parent_node<FixedKVNode>();
//TODO: can this search be avoided?
auto off = parent->lower_bound_offset(get_node_meta().begin);
assert(parent->get_key_from_idx(off) == get_node_meta().begin);
parent->children[off] = this;
}
bool is_children_empty() const {
for (auto it = children.begin();
it != children.begin() + get_node_size();
it++) {
if (is_valid_child_ptr(*it)
&& (*it)->is_valid()) {
return false;
}
}
return true;
}
void set_children_from_prior_instance() {
assert(get_prior_instance());
auto &prior = (FixedKVNode&)(*get_prior_instance());
assert(prior.my_tracker || prior.is_children_empty());
if (prior.my_tracker) {
prior.my_tracker->reset_parent(this);
my_tracker = prior.my_tracker;
// All my initial pending children is pointing to the original
// tracker which has been dropped by the above line, so need
// to adjust them to point to the new tracker
adjust_ptracker_for_children();
}
assert(my_tracker || is_children_empty());
}
void adjust_ptracker_for_children() {
auto begin = children.begin();
auto end = begin + get_node_size();
ceph_assert(end <= children.end());
for (auto it = begin; it != end; it++) {
auto child = *it;
if (is_valid_child_ptr(child)) {
set_child_ptracker(child);
}
}
}
void on_delta_write(paddr_t record_block_offset) final {
// All in-memory relative addrs are necessarily record-relative
assert(get_prior_instance());
assert(pending_for_transaction);
resolve_relative_addrs(record_block_offset);
}
virtual uint16_t lower_bound_offset(node_key_t) const = 0;
virtual uint16_t upper_bound_offset(node_key_t) const = 0;
virtual uint16_t child_pos_for_key(node_key_t) const = 0;
virtual bool validate_stable_children() = 0;
template<typename iter_t>
uint16_t copy_children_from_stable_source(
FixedKVNode &source,
iter_t foreign_start_it,
iter_t foreign_end_it,
iter_t local_start_it) {
auto foreign_it = foreign_start_it, local_it = local_start_it;
while (foreign_it != foreign_end_it
&& local_it.get_offset() < get_node_size())
{
auto &child = children[local_it.get_offset()];
if (foreign_it.get_key() == local_it.get_key()) {
// the foreign key is preserved
if (!child) {
child = source.children[foreign_it.get_offset()];
}
foreign_it++;
local_it++;
} else if (foreign_it.get_key() < local_it.get_key()) {
// the foreign key has been removed, because, if it hasn't,
// there must have been a local key before the one pointed
// by the current "local_it" that's equal to this foreign key
// and has pushed the foreign_it forward.
foreign_it++;
} else {
// the local key must be a newly inserted one.
local_it++;
}
}
return local_it.get_offset();
}
template<typename Func>
void copy_children_from_stable_sources(Func &&get_iter) {
if (!copy_sources.empty()) {
auto it = --copy_sources.upper_bound(get_node_meta().begin);
auto &cs = *it;
uint16_t start_pos = cs->lower_bound_offset(
get_node_meta().begin);
if (start_pos == cs->get_node_size()) {
it++;
start_pos = 0;
}
uint16_t local_next_pos = 0;
for (; it != copy_sources.end(); it++) {
auto& copy_source = *it;
auto end_pos = copy_source->get_node_size();
if (copy_source->get_node_meta().is_in_range(get_node_meta().end)) {
end_pos = copy_source->upper_bound_offset(get_node_meta().end);
}
auto local_start_iter = get_iter(*this, local_next_pos);
auto foreign_start_iter = get_iter(*copy_source, start_pos);
auto foreign_end_iter = get_iter(*copy_source, end_pos);
local_next_pos = copy_children_from_stable_source(
*copy_source, foreign_start_iter, foreign_end_iter, local_start_iter);
if (end_pos != copy_source->get_node_size()) {
break;
}
start_pos = 0;
}
}
}
void on_invalidated(Transaction &t) final {
reset_parent_tracker();
}
bool is_rewrite() {
return is_initial_pending() && get_prior_instance();
}
void on_initial_write() final {
// All in-memory relative addrs are necessarily block-relative
resolve_relative_addrs(get_paddr());
if (range.is_root()) {
reset_parent_tracker();
}
assert(has_parent_tracker() ? (is_parent_valid()) : true);
}
void set_child_ptracker(ChildableCachedExtent *child) {
if (!this->my_tracker) {
this->my_tracker = new parent_tracker_t(this);
}
child->reset_parent_tracker(this->my_tracker);
}
void on_clean_read() final {
// From initial write of block, relative addrs are necessarily block-relative
resolve_relative_addrs(get_paddr());
}
virtual void resolve_relative_addrs(paddr_t base) = 0;
};
/**
* FixedKVInternalNode
*
* Abstracts operations on and layout of internal nodes for the
* FixedKVBTree.
*/
template <
size_t CAPACITY,
typename NODE_KEY,
typename NODE_KEY_LE,
size_t node_size,
typename node_type_t>
struct FixedKVInternalNode
: FixedKVNode<NODE_KEY>,
common::FixedKVNodeLayout<
CAPACITY,
fixed_kv_node_meta_t<NODE_KEY>,
fixed_kv_node_meta_le_t<NODE_KEY_LE>,
NODE_KEY, NODE_KEY_LE,
paddr_t, paddr_le_t> {
using Ref = TCachedExtentRef<node_type_t>;
using base_t = FixedKVNode<NODE_KEY>;
using base_ref = typename FixedKVNode<NODE_KEY>::FixedKVNodeRef;
using node_layout_t =
common::FixedKVNodeLayout<
CAPACITY,
fixed_kv_node_meta_t<NODE_KEY>,
fixed_kv_node_meta_le_t<NODE_KEY_LE>,
NODE_KEY,
NODE_KEY_LE,
paddr_t,
paddr_le_t>;
using internal_const_iterator_t = typename node_layout_t::const_iterator;
using internal_iterator_t = typename node_layout_t::iterator;
using this_type_t = FixedKVInternalNode<
CAPACITY,
NODE_KEY,
NODE_KEY_LE,
node_size,
node_type_t>;
FixedKVInternalNode(ceph::bufferptr &&ptr)
: FixedKVNode<NODE_KEY>(CAPACITY, std::move(ptr)),
node_layout_t(this->get_bptr().c_str()) {}
FixedKVInternalNode(const FixedKVInternalNode &rhs)
: FixedKVNode<NODE_KEY>(rhs),
node_layout_t(this->get_bptr().c_str()) {}
bool is_leaf_and_has_children() const final {
return false;
}
uint16_t get_node_split_pivot() final {
return this->get_split_pivot().get_offset();
}
void prepare_commit() final {
if (this->is_initial_pending()) {
if (this->is_rewrite()) {
this->set_children_from_prior_instance();
}
this->copy_children_from_stable_sources(
[this](base_t &node, uint16_t pos) {
ceph_assert(node.get_type() == this->get_type());
auto &n = static_cast<this_type_t&>(node);
return n.iter_idx(pos);
}
);
if (this->is_rewrite()) {
this->reset_prior_instance();
} else {
this->adjust_ptracker_for_children();
}
assert(this->validate_stable_children());
this->copy_sources.clear();
}
}
get_child_ret_t<LogicalCachedExtent>
get_logical_child(op_context_t<NODE_KEY>, uint16_t pos) final {
ceph_abort("impossible");
return get_child_ret_t<LogicalCachedExtent>(child_pos_t(nullptr, 0));
}
bool validate_stable_children() final {
LOG_PREFIX(FixedKVInternalNode::validate_stable_children);
if (this->children.empty()) {
return false;
}
for (auto i : *this) {
auto child = (FixedKVNode<NODE_KEY>*)this->children[i.get_offset()];
if (child && child->range.begin != i.get_key()) {
SUBERROR(seastore_fixedkv_tree,
"stable child not valid: child {}, child meta{}, key {}",
*child,
child->get_node_meta(),
i.get_key());
ceph_abort();
return false;
}
}
return true;
}
virtual ~FixedKVInternalNode() {
if (this->is_valid() && !this->is_pending()) {
if (this->range.is_root()) {
ceph_assert(this->root_block);
unlink_phy_tree_root_node<NODE_KEY>(this->root_block);
} else {
ceph_assert(this->is_parent_valid());
auto parent = this->template get_parent_node<FixedKVNode<NODE_KEY>>();
auto off = parent->lower_bound_offset(this->get_meta().begin);
assert(parent->get_key_from_idx(off) == this->get_meta().begin);
assert(parent->children[off] == this);
parent->children[off] = nullptr;
}
}
}
uint16_t lower_bound_offset(NODE_KEY key) const final {
return this->lower_bound(key).get_offset();
}
uint16_t upper_bound_offset(NODE_KEY key) const final {
return this->upper_bound(key).get_offset();
}
uint16_t child_pos_for_key(NODE_KEY key) const final {
auto it = this->upper_bound(key);
assert(it != this->begin());
--it;
return it.get_offset();
}
NODE_KEY get_key_from_idx(uint16_t idx) const final {
return this->iter_idx(idx).get_key();
}
fixed_kv_node_meta_t<NODE_KEY> get_node_meta() const {
return this->get_meta();
}
uint16_t get_node_size() const final {
return this->get_size();
}
typename node_layout_t::delta_buffer_t delta_buffer;
typename node_layout_t::delta_buffer_t *maybe_get_delta_buffer() {
return this->is_mutation_pending()
? &delta_buffer : nullptr;
}
CachedExtentRef duplicate_for_write(Transaction&) override {
assert(delta_buffer.empty());
return CachedExtentRef(new node_type_t(*this));
};
void on_replace_prior(Transaction&) final {
ceph_assert(!this->is_rewrite());
this->set_children_from_prior_instance();
auto &prior = (this_type_t&)(*this->get_prior_instance());
auto copied = this->copy_children_from_stable_source(
prior,
prior.begin(),
prior.end(),
this->begin());
ceph_assert(copied <= get_node_size());
assert(this->validate_stable_children());
this->set_parent_tracker_from_prior_instance();
}
void update(
internal_const_iterator_t iter,
paddr_t addr,
FixedKVNode<NODE_KEY>* nextent) {
LOG_PREFIX(FixedKVInternalNode::update);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, {}",
this->pending_for_transaction,
iter.get_offset(),
*nextent);
this->update_child_ptr(iter, nextent);
return this->journal_update(
iter,
this->maybe_generate_relative(addr),
maybe_get_delta_buffer());
}
void insert(
internal_const_iterator_t iter,
NODE_KEY pivot,
paddr_t addr,
FixedKVNode<NODE_KEY>* nextent) {
LOG_PREFIX(FixedKVInternalNode::insert);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, key {}, {}",
this->pending_for_transaction,
iter.get_offset(),
pivot,
*nextent);
this->insert_child_ptr(iter, nextent);
return this->journal_insert(
iter,
pivot,
this->maybe_generate_relative(addr),
maybe_get_delta_buffer());
}
void remove(internal_const_iterator_t iter) {
LOG_PREFIX(FixedKVInternalNode::remove);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, key {}",
this->pending_for_transaction,
iter.get_offset(),
iter.get_key());
this->remove_child_ptr(iter);
return this->journal_remove(
iter,
maybe_get_delta_buffer());
}
void replace(
internal_const_iterator_t iter,
NODE_KEY pivot,
paddr_t addr,
FixedKVNode<NODE_KEY>* nextent) {
LOG_PREFIX(FixedKVInternalNode::replace);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, old key {}, key {}, {}",
this->pending_for_transaction,
iter.get_offset(),
iter.get_key(),
pivot,
*nextent);
this->update_child_ptr(iter, nextent);
return this->journal_replace(
iter,
pivot,
this->maybe_generate_relative(addr),
maybe_get_delta_buffer());
}
std::tuple<Ref, Ref, NODE_KEY>
make_split_children(op_context_t<NODE_KEY> c) {
auto left = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto right = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
this->split_child_ptrs(*left, *right);
auto pivot = this->split_into(*left, *right);
left->range = left->get_meta();
right->range = right->get_meta();
return std::make_tuple(
left,
right,
pivot);
}
Ref make_full_merge(
op_context_t<NODE_KEY> c,
Ref &right) {
auto replacement = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
replacement->merge_child_ptrs(*this, *right);
replacement->merge_from(*this, *right->template cast<node_type_t>());
replacement->range = replacement->get_meta();
return replacement;
}
std::tuple<Ref, Ref, NODE_KEY>
make_balanced(
op_context_t<NODE_KEY> c,
Ref &_right,
bool prefer_left) {
ceph_assert(_right->get_type() == this->get_type());
auto &right = *_right->template cast<node_type_t>();
auto replacement_left = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto replacement_right = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto pivot = this->balance_into_new_nodes(
*this,
right,
prefer_left,
*replacement_left,
*replacement_right);
this->balance_child_ptrs(
*this,
right,
prefer_left,
*replacement_left,
*replacement_right);
replacement_left->range = replacement_left->get_meta();
replacement_right->range = replacement_right->get_meta();
return std::make_tuple(
replacement_left,
replacement_right,
pivot);
}
/**
* Internal relative addresses on read or in memory prior to commit
* are either record or block relative depending on whether this
* physical node is is_initial_pending() or just is_mutable().
*
* User passes appropriate base depending on lifecycle and
* resolve_relative_addrs fixes up relative internal references
* based on base.
*/
void resolve_relative_addrs(paddr_t base)
{
LOG_PREFIX(FixedKVInternalNode::resolve_relative_addrs);
for (auto i: *this) {
if (i->get_val().is_relative()) {
auto updated = base.add_relative(i->get_val());
SUBTRACE(seastore_fixedkv_tree, "{} -> {}", i->get_val(), updated);
i->set_val(updated);
}
}
}
void node_resolve_vals(
internal_iterator_t from,
internal_iterator_t to) const {
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
if (i->get_val().is_relative()) {
assert(i->get_val().is_block_relative());
i->set_val(this->get_paddr().add_relative(i->get_val()));
}
}
}
}
void node_unresolve_vals(
internal_iterator_t from,
internal_iterator_t to) const {
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
if (i->get_val().is_relative()) {
assert(i->get_val().is_record_relative());
i->set_val(i->get_val().block_relative_to(this->get_paddr()));
}
}
}
}
std::ostream &_print_detail(std::ostream &out) const
{
out << ", size=" << this->get_size()
<< ", meta=" << this->get_meta()
<< ", my_tracker=" << (void*)this->my_tracker;
if (this->my_tracker) {
out << ", my_tracker->parent=" << (void*)this->my_tracker->get_parent().get();
}
return out << ", root_block=" << (void*)this->root_block.get();
}
ceph::bufferlist get_delta() {
ceph::buffer::ptr bptr(delta_buffer.get_bytes());
delta_buffer.copy_out(bptr.c_str(), bptr.length());
ceph::bufferlist bl;
bl.push_back(bptr);
return bl;
}
void apply_delta_and_adjust_crc(
paddr_t base, const ceph::bufferlist &_bl) {
assert(_bl.length());
ceph::bufferlist bl = _bl;
bl.rebuild();
typename node_layout_t::delta_buffer_t buffer;
buffer.copy_in(bl.front().c_str(), bl.front().length());
buffer.replay(*this);
this->set_last_committed_crc(this->get_crc32c());
resolve_relative_addrs(base);
}
constexpr static size_t get_min_capacity() {
return (node_layout_t::get_capacity() - 1) / 2;
}
bool at_max_capacity() const {
assert(this->get_size() <= node_layout_t::get_capacity());
return this->get_size() == node_layout_t::get_capacity();
}
bool at_min_capacity() const {
assert(this->get_size() >= (get_min_capacity() - 1));
return this->get_size() <= get_min_capacity();
}
bool below_min_capacity() const {
assert(this->get_size() >= (get_min_capacity() - 1));
return this->get_size() < get_min_capacity();
}
};
template <
size_t CAPACITY,
typename NODE_KEY,
typename NODE_KEY_LE,
typename VAL,
typename VAL_LE,
size_t node_size,
typename node_type_t,
bool has_children>
struct FixedKVLeafNode
: FixedKVNode<NODE_KEY>,
common::FixedKVNodeLayout<
CAPACITY,
fixed_kv_node_meta_t<NODE_KEY>,
fixed_kv_node_meta_le_t<NODE_KEY_LE>,
NODE_KEY, NODE_KEY_LE,
VAL, VAL_LE> {
using Ref = TCachedExtentRef<node_type_t>;
using node_layout_t =
common::FixedKVNodeLayout<
CAPACITY,
fixed_kv_node_meta_t<NODE_KEY>,
fixed_kv_node_meta_le_t<NODE_KEY_LE>,
NODE_KEY,
NODE_KEY_LE,
VAL,
VAL_LE>;
using internal_const_iterator_t = typename node_layout_t::const_iterator;
using this_type_t = FixedKVLeafNode<
CAPACITY,
NODE_KEY,
NODE_KEY_LE,
VAL,
VAL_LE,
node_size,
node_type_t,
has_children>;
using base_t = FixedKVNode<NODE_KEY>;
FixedKVLeafNode(ceph::bufferptr &&ptr)
: FixedKVNode<NODE_KEY>(has_children ? CAPACITY : 0, std::move(ptr)),
node_layout_t(this->get_bptr().c_str()) {}
FixedKVLeafNode(const FixedKVLeafNode &rhs)
: FixedKVNode<NODE_KEY>(rhs),
node_layout_t(this->get_bptr().c_str()) {}
static constexpr bool do_has_children = has_children;
bool is_leaf_and_has_children() const final {
return has_children;
}
uint16_t get_node_split_pivot() final {
return this->get_split_pivot().get_offset();
}
get_child_ret_t<LogicalCachedExtent>
get_logical_child(op_context_t<NODE_KEY> c, uint16_t pos) final {
auto child = this->children[pos];
if (is_valid_child_ptr(child)) {
ceph_assert(child->is_logical());
return c.cache.template get_extent_viewable_by_trans<
LogicalCachedExtent>(c.trans, (LogicalCachedExtent*)child);
} else if (this->is_pending()) {
auto key = this->iter_idx(pos).get_key();
auto &sparent = this->get_stable_for_key(key);
auto spos = sparent.child_pos_for_key(key);
auto child = sparent.children[spos];
if (is_valid_child_ptr(child)) {
ceph_assert(child->is_logical());
return c.cache.template get_extent_viewable_by_trans<
LogicalCachedExtent>(c.trans, (LogicalCachedExtent*)child);
} else {
return child_pos_t(&sparent, spos);
}
} else {
return child_pos_t(this, pos);
}
}
bool validate_stable_children() override {
return true;
}
virtual ~FixedKVLeafNode() {
if (this->is_valid() && !this->is_pending()) {
if (this->range.is_root()) {
ceph_assert(this->root_block);
unlink_phy_tree_root_node<NODE_KEY>(this->root_block);
} else {
ceph_assert(this->is_parent_valid());
auto parent = this->template get_parent_node<FixedKVNode<NODE_KEY>>();
auto off = parent->lower_bound_offset(this->get_meta().begin);
assert(parent->get_key_from_idx(off) == this->get_meta().begin);
assert(parent->children[off] == this);
parent->children[off] = nullptr;
}
}
}
void prepare_commit() final {
if constexpr (has_children) {
if (this->is_initial_pending()) {
if (this->is_rewrite()) {
this->set_children_from_prior_instance();
}
this->copy_children_from_stable_sources(
[this](base_t &node, uint16_t pos) {
ceph_assert(node.get_type() == this->get_type());
auto &n = static_cast<this_type_t&>(node);
return n.iter_idx(pos);
}
);
if (this->is_rewrite()) {
this->reset_prior_instance();
} else {
this->adjust_ptracker_for_children();
}
assert(this->validate_stable_children());
this->copy_sources.clear();
}
}
assert(this->is_initial_pending()
? this->copy_sources.empty():
true);
}
void on_replace_prior(Transaction&) final {
ceph_assert(!this->is_rewrite());
if constexpr (has_children) {
this->set_children_from_prior_instance();
auto &prior = (this_type_t&)(*this->get_prior_instance());
auto copied = this->copy_children_from_stable_source(
prior,
prior.begin(),
prior.end(),
this->begin());
ceph_assert(copied <= get_node_size());
assert(this->validate_stable_children());
this->set_parent_tracker_from_prior_instance();
} else {
this->set_parent_tracker_from_prior_instance();
}
}
uint16_t lower_bound_offset(NODE_KEY key) const final {
return this->lower_bound(key).get_offset();
}
uint16_t upper_bound_offset(NODE_KEY key) const final {
return this->upper_bound(key).get_offset();
}
uint16_t child_pos_for_key(NODE_KEY key) const final {
return lower_bound_offset(key);
}
NODE_KEY get_key_from_idx(uint16_t idx) const final {
return this->iter_idx(idx).get_key();
}
fixed_kv_node_meta_t<NODE_KEY> get_node_meta() const {
return this->get_meta();
}
uint16_t get_node_size() const final {
return this->get_size();
}
typename node_layout_t::delta_buffer_t delta_buffer;
virtual typename node_layout_t::delta_buffer_t *maybe_get_delta_buffer() {
return this->is_mutation_pending() ? &delta_buffer : nullptr;
}
CachedExtentRef duplicate_for_write(Transaction&) override {
assert(delta_buffer.empty());
return CachedExtentRef(new node_type_t(*this));
};
virtual void update(
internal_const_iterator_t iter,
VAL val,
LogicalCachedExtent* nextent) = 0;
virtual internal_const_iterator_t insert(
internal_const_iterator_t iter,
NODE_KEY addr,
VAL val,
LogicalCachedExtent* nextent) = 0;
virtual void remove(internal_const_iterator_t iter) = 0;
std::tuple<Ref, Ref, NODE_KEY>
make_split_children(op_context_t<NODE_KEY> c) {
auto left = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto right = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
if constexpr (has_children) {
this->split_child_ptrs(*left, *right);
}
auto pivot = this->split_into(*left, *right);
left->range = left->get_meta();
right->range = right->get_meta();
return std::make_tuple(
left,
right,
pivot);
}
Ref make_full_merge(
op_context_t<NODE_KEY> c,
Ref &right) {
auto replacement = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
if constexpr (has_children) {
replacement->merge_child_ptrs(*this, *right);
}
replacement->merge_from(*this, *right->template cast<node_type_t>());
replacement->range = replacement->get_meta();
return replacement;
}
std::tuple<Ref, Ref, NODE_KEY>
make_balanced(
op_context_t<NODE_KEY> c,
Ref &_right,
bool prefer_left) {
ceph_assert(_right->get_type() == this->get_type());
auto &right = *_right->template cast<node_type_t>();
auto replacement_left = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto replacement_right = c.cache.template alloc_new_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto pivot = this->balance_into_new_nodes(
*this,
right,
prefer_left,
*replacement_left,
*replacement_right);
if constexpr (has_children) {
this->balance_child_ptrs(
*this,
right,
prefer_left,
*replacement_left,
*replacement_right);
}
replacement_left->range = replacement_left->get_meta();
replacement_right->range = replacement_right->get_meta();
return std::make_tuple(
replacement_left,
replacement_right,
pivot);
}
ceph::bufferlist get_delta() {
ceph::buffer::ptr bptr(delta_buffer.get_bytes());
delta_buffer.copy_out(bptr.c_str(), bptr.length());
ceph::bufferlist bl;
bl.push_back(bptr);
return bl;
}
void apply_delta_and_adjust_crc(
paddr_t base, const ceph::bufferlist &_bl) {
assert(_bl.length());
ceph::bufferlist bl = _bl;
bl.rebuild();
typename node_layout_t::delta_buffer_t buffer;
buffer.copy_in(bl.front().c_str(), bl.front().length());
buffer.replay(*this);
this->set_last_committed_crc(this->get_crc32c());
this->resolve_relative_addrs(base);
}
std::ostream &_print_detail(std::ostream &out) const
{
return out << ", size=" << this->get_size()
<< ", meta=" << this->get_meta();
}
constexpr static size_t get_min_capacity() {
return (node_layout_t::get_capacity() - 1) / 2;
}
bool at_max_capacity() const {
assert(this->get_size() <= node_layout_t::get_capacity());
return this->get_size() == node_layout_t::get_capacity();
}
bool at_min_capacity() const {
assert(this->get_size() >= (get_min_capacity() - 1));
return this->get_size() <= get_min_capacity();
}
bool below_min_capacity() const {
assert(this->get_size() >= (get_min_capacity() - 1));
return this->get_size() < get_min_capacity();
}
};
} // namespace crimson::os::seastore
#if FMT_VERSION >= 90000
template <>
struct fmt::formatter<
crimson::os::seastore::FixedKVNode<
crimson::os::seastore::laddr_t>> : fmt::ostream_formatter {};
template <>
struct fmt::formatter<
crimson::os::seastore::FixedKVNode<
crimson::os::seastore::paddr_t>> : fmt::ostream_formatter {};
#endif
| 37,347 | 29.588043 | 84 | h |
null | ceph-main/src/crimson/os/seastore/collection_manager/collection_flat_node.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/buffer.h"
#include "osd/osd_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/collection_manager/collection_flat_node.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_seastore);
}
}
namespace crimson::os::seastore::collection_manager {
void delta_t::replay(coll_map_t &l) const
{
switch (op) {
case op_t::INSERT: {
l.insert(coll, bits);
break;
}
case op_t::UPDATE: {
l.update(coll, bits);
break;
}
case op_t::REMOVE: {
l.erase(coll);
break;
}
case op_t::INVALID: {
assert(0 == "impossible");
break;
}
__builtin_unreachable();
}
}
std::ostream &CollectionNode::print_detail_l(std::ostream &out) const
{
return out;
}
CollectionNode::list_ret
CollectionNode::list()
{
read_to_local();
logger().debug("CollectionNode:{}, {}", __func__, *this);
CollectionManager::list_ret_bare list_result;
for (auto &[coll, bits] : decoded) {
list_result.emplace_back(coll, bits);
}
return list_ret(
interruptible::ready_future_marker{},
std::move(list_result));
}
CollectionNode::create_ret
CollectionNode::create(coll_context_t cc, coll_t coll, unsigned bits)
{
read_to_local();
logger().debug("CollectionNode:{}", __func__);
if (!is_mutable()) {
auto mut = cc.tm.get_mutable_extent(cc.t, this)->cast<CollectionNode>();
return mut->create(cc, coll, bits);
}
logger().debug("CollectionNode::create {} {} {}", coll, bits, *this);
auto [iter, inserted] = decoded.insert(coll, bits);
assert(inserted);
if (encoded_sizeof((base_coll_map_t&)decoded) > get_bptr().length()) {
decoded.erase(iter);
return create_ret(
interruptible::ready_future_marker{},
create_result_t::OVERFLOW);
} else {
if (auto buffer = maybe_get_delta_buffer(); buffer) {
buffer->insert(coll, bits);
}
copy_to_node();
return create_ret(
interruptible::ready_future_marker{},
create_result_t::SUCCESS);
}
}
CollectionNode::update_ret
CollectionNode::update(coll_context_t cc, coll_t coll, unsigned bits)
{
read_to_local();
logger().debug("CollectionNode:{}", __func__);
if (!is_mutable()) {
auto mut = cc.tm.get_mutable_extent(cc.t, this)->cast<CollectionNode>();
return mut->update(cc, coll, bits);
}
if (auto buffer = maybe_get_delta_buffer(); buffer) {
buffer->update(coll, bits);
}
decoded.update(coll, bits);
copy_to_node();
return seastar::now();
}
CollectionNode::remove_ret
CollectionNode::remove(coll_context_t cc, coll_t coll)
{
read_to_local();
logger().debug("CollectionNode:{}", __func__);
if (!is_mutable()) {
auto mut = cc.tm.get_mutable_extent(cc.t, this)->cast<CollectionNode>();
return mut->remove(cc, coll);
}
if (auto buffer = maybe_get_delta_buffer(); buffer) {
buffer->remove(coll);
}
decoded.remove(coll);
copy_to_node();
return seastar::now();
}
}
| 3,052 | 23.821138 | 76 | cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.