repo
stringlengths
1
152
file
stringlengths
15
205
code
stringlengths
0
41.6M
file_length
int64
0
41.6M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
90 values
null
ceph-main/src/crimson/osd/osd_meta.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <map> #include <string> #include <seastar/core/future.hh> #include "osd/osd_types.h" #include "crimson/os/futurized_collection.h" #include "crimson/os/futurized_store.h" namespace ceph::os { class Transaction; } namespace crimson::os { class FuturizedCollection; class FuturizedStore; } /// metadata shared across PGs, or put in another way, /// metadata not specific to certain PGs. class OSDMeta { template<typename T> using Ref = boost::intrusive_ptr<T>; crimson::os::FuturizedStore::Shard& store; Ref<crimson::os::FuturizedCollection> coll; public: OSDMeta(Ref<crimson::os::FuturizedCollection> coll, crimson::os::FuturizedStore::Shard& store) : store{store}, coll{coll} {} auto collection() { return coll; } void create(ceph::os::Transaction& t); void store_map(ceph::os::Transaction& t, epoch_t e, const bufferlist& m); seastar::future<bufferlist> load_map(epoch_t e); void store_superblock(ceph::os::Transaction& t, const OSDSuperblock& sb); using load_superblock_ertr = crimson::os::FuturizedStore::Shard::read_errorator; using load_superblock_ret = load_superblock_ertr::future<OSDSuperblock>; load_superblock_ret load_superblock(); using ec_profile_t = std::map<std::string, std::string>; seastar::future<std::tuple<pg_pool_t, std::string, ec_profile_t>> load_final_pool_info(int64_t pool); private: static ghobject_t osdmap_oid(epoch_t epoch); static ghobject_t final_pool_info_oid(int64_t pool); static ghobject_t superblock_oid(); };
1,698
26.852459
82
h
null
ceph-main/src/crimson/osd/osd_operation.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "osd_operation.h" #include "common/Formatter.h" #include "crimson/common/log.h" #include "crimson/osd/osd_operations/client_request.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { void OSDOperationRegistry::do_stop() { logger().info("OSDOperationRegistry::{}", __func__); // we need to decouple visiting the registry from destructing // ops because of the auto-unlink feature of boost::intrusive. // the list shouldn't change while iterating due to constrains // on iterator's validity. constexpr auto historic_reg_index = static_cast<size_t>(OperationTypeCode::historic_client_request); auto& historic_registry = get_registry<historic_reg_index>(); std::vector<ClientRequest::ICRef> to_ref_down; std::transform(std::begin(historic_registry), std::end(historic_registry), std::back_inserter(to_ref_down), [] (const Operation& op) { return ClientRequest::ICRef{ static_cast<const ClientRequest*>(&op), /* add_ref= */ false }; }); last_of_recents = std::end(historic_registry); // to_ref_down is going off } OSDOperationRegistry::OSDOperationRegistry() : OperationRegistryT(seastar::this_shard_id()) { constexpr auto historic_reg_index = static_cast<size_t>(OperationTypeCode::historic_client_request); auto& historic_registry = get_registry<historic_reg_index>(); last_of_recents = std::begin(historic_registry); } static auto get_duration(const ClientRequest& client_request) { // TODO: consider enhancing `CompletionEvent` with computing duration // once -- when it's enetered. return client_request.get_completed() - client_request.get_started(); } void OSDOperationRegistry::put_historic(const ClientRequest& op) { // unlink the op from the client request registry. this is a part of // the re-link procedure. finally it will be in historic registry. constexpr auto client_reg_index = static_cast<size_t>(OperationTypeCode::client_request); constexpr auto historic_reg_index = static_cast<size_t>(OperationTypeCode::historic_client_request); auto& client_registry = get_registry<client_reg_index>(); auto& historic_registry = get_registry<historic_reg_index>(); historic_registry.splice(std::end(historic_registry), client_registry, client_registry.iterator_to(op)); ClientRequest::ICRef( &op, /* add_ref= */true ).detach(); // yes, "leak" it for now! // check whether the history size limit is not exceeded; if so, then // purge the oldest op. // NOTE: Operation uses the auto-unlink feature of boost::intrusive. // NOTE: the cleaning happens in OSDOperationRegistry::do_stop() using crimson::common::local_conf; if (num_recent_ops >= local_conf()->osd_op_history_size) { ++last_of_recents; ++num_slow_ops; } else { ++num_recent_ops; } if (num_slow_ops > local_conf()->osd_op_history_slow_op_size) { // we're interested in keeping slowest ops. if the slow op history // is disabled, the list will have only one element, so the full-blown // search will boil down into `.front()`. const auto fastest_historic_iter = std::min_element( std::cbegin(historic_registry), last_of_recents, [] (const auto& lop, const auto& rop) { const auto& lclient_request = static_cast<const ClientRequest&>(lop); const auto& rclient_request = static_cast<const ClientRequest&>(rop); return get_duration(lclient_request) < get_duration(rclient_request); }); assert(fastest_historic_iter != std::end(historic_registry)); const auto& fastest_historic_op = static_cast<const ClientRequest&>(*fastest_historic_iter); historic_registry.erase(fastest_historic_iter); // clear a previously "leaked" op ClientRequest::ICRef(&fastest_historic_op, /* add_ref= */false); --num_slow_ops; } } size_t OSDOperationRegistry::dump_historic_client_requests(ceph::Formatter* f) const { const auto& historic_client_registry = get_registry<static_cast<size_t>(OperationTypeCode::historic_client_request)>(); //ClientRequest::type)>(); f->open_object_section("op_history"); f->dump_int("size", historic_client_registry.size()); // TODO: f->dump_int("duration", history_duration.load()); // the intrusive list is configured to not store the size size_t ops_count = 0; { f->open_array_section("ops"); for (const auto& op : historic_client_registry) { op.dump(f); ++ops_count; } f->close_section(); } f->close_section(); return ops_count; } size_t OSDOperationRegistry::dump_slowest_historic_client_requests(ceph::Formatter* f) const { const auto& historic_client_registry = get_registry<static_cast<size_t>(OperationTypeCode::historic_client_request)>(); //ClientRequest::type)>(); f->open_object_section("op_history"); f->dump_int("size", historic_client_registry.size()); // TODO: f->dump_int("duration", history_duration.load()); // the intrusive list is configured to not store the size std::multimap<utime_t, const ClientRequest*, std::greater<utime_t>> sorted_slowest_ops; // iterating over the entire registry as a slow op could be also // in the "recently added" part. std::transform(std::begin(historic_client_registry), std::end(historic_client_registry), std::inserter(sorted_slowest_ops, std::end(sorted_slowest_ops)), [] (const Operation& op) { const auto& cop = static_cast<const ClientRequest&>(op); return std::make_pair(get_duration(cop), &cop); }); f->open_array_section("ops"); using crimson::common::local_conf; size_t ops_count = 0; for (auto it = std::begin(sorted_slowest_ops); ops_count < local_conf()->osd_op_history_slow_op_size && it != std::end(sorted_slowest_ops); ++it, ++ops_count) { it->second->dump(f); } f->close_section(); return ops_count; } OperationThrottler::OperationThrottler(ConfigProxy &conf) : scheduler(crimson::osd::scheduler::make_scheduler(conf)) { conf.add_observer(this); update_from_config(conf); } void OperationThrottler::wake() { while ((!max_in_progress || in_progress < max_in_progress) && !scheduler->empty()) { auto item = scheduler->dequeue(); item.wake.set_value(); ++in_progress; --pending; } } void OperationThrottler::release_throttle() { ceph_assert(in_progress > 0); --in_progress; wake(); } seastar::future<> OperationThrottler::acquire_throttle( crimson::osd::scheduler::params_t params) { crimson::osd::scheduler::item_t item{params, seastar::promise<>()}; auto fut = item.wake.get_future(); scheduler->enqueue(std::move(item)); return fut; } void OperationThrottler::dump_detail(Formatter *f) const { f->dump_unsigned("max_in_progress", max_in_progress); f->dump_unsigned("in_progress", in_progress); f->open_object_section("scheduler"); { scheduler->dump(*f); } f->close_section(); } void OperationThrottler::update_from_config(const ConfigProxy &conf) { max_in_progress = conf.get_val<uint64_t>("crimson_osd_scheduler_concurrency"); wake(); } const char** OperationThrottler::get_tracked_conf_keys() const { static const char* KEYS[] = { "crimson_osd_scheduler_concurrency", NULL }; return KEYS; } void OperationThrottler::handle_conf_change( const ConfigProxy& conf, const std::set<std::string> &changed) { update_from_config(conf); } }
7,522
31.995614
111
cc
null
ceph-main/src/crimson/osd/osd_operation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/common/operation.h" #include "crimson/osd/pg_interval_interrupt_condition.h" #include "crimson/osd/scheduler/scheduler.h" #include "osd/osd_types.h" namespace crimson::os::seastore { template<class OpT> class OperationProxyT; } namespace crimson::osd { /// Ordering stages for a class of operations ordered by PG. struct ConnectionPipeline { struct AwaitActive : OrderedExclusivePhaseT<AwaitActive> { static constexpr auto type_name = "ConnectionPipeline::await_active"; } await_active; struct AwaitMap : OrderedExclusivePhaseT<AwaitMap> { static constexpr auto type_name = "ConnectionPipeline::await_map"; } await_map; struct GetPG : OrderedExclusivePhaseT<GetPG> { static constexpr auto type_name = "ConnectionPipeline::get_pg"; } get_pg; }; enum class OperationTypeCode { client_request = 0, peering_event, pg_advance_map, pg_creation, replicated_request, background_recovery, background_recovery_sub, internal_client_request, historic_client_request, logmissing_request, logmissing_request_reply, snaptrim_event, snaptrimobj_subevent, last_op }; static constexpr const char* const OP_NAMES[] = { "client_request", "peering_event", "pg_advance_map", "pg_creation", "replicated_request", "background_recovery", "background_recovery_sub", "internal_client_request", "historic_client_request", "logmissing_request", "logmissing_request_reply", "snaptrim_event", "snaptrimobj_subevent", }; // prevent the addition of OperationTypeCode-s with no matching OP_NAMES entry: static_assert( (sizeof(OP_NAMES)/sizeof(OP_NAMES[0])) == static_cast<int>(OperationTypeCode::last_op)); struct InterruptibleOperation : Operation { template <typename ValuesT = void> using interruptible_future = ::crimson::interruptible::interruptible_future< ::crimson::osd::IOInterruptCondition, ValuesT>; using interruptor = ::crimson::interruptible::interruptor< ::crimson::osd::IOInterruptCondition>; }; template <typename T> struct OperationT : InterruptibleOperation { static constexpr const char *type_name = OP_NAMES[static_cast<int>(T::type)]; using IRef = boost::intrusive_ptr<T>; using ICRef = boost::intrusive_ptr<const T>; unsigned get_type() const final { return static_cast<unsigned>(T::type); } const char *get_type_name() const final { return T::type_name; } virtual ~OperationT() = default; private: virtual void dump_detail(ceph::Formatter *f) const = 0; }; template <class T> class TrackableOperationT : public OperationT<T> { T* that() { return static_cast<T*>(this); } const T* that() const { return static_cast<const T*>(this); } protected: template<class EventT> decltype(auto) get_event() { // all out derivates are supposed to define the list of tracking // events accessible via `std::get`. This will usually boil down // into an instance of `std::tuple`. return std::get<EventT>(that()->tracking_events); } template<class EventT> decltype(auto) get_event() const { return std::get<EventT>(that()->tracking_events); } using OperationT<T>::OperationT; struct StartEvent : TimeEvent<StartEvent> {}; struct CompletionEvent : TimeEvent<CompletionEvent> {}; template <class EventT, class... Args> void track_event(Args&&... args) { // the idea is to have a visitor-like interface that allows to double // dispatch (backend, blocker type) get_event<EventT>().trigger(*that(), std::forward<Args>(args)...); } template <class BlockingEventT, class InterruptorT=void, class F> auto with_blocking_event(F&& f) { auto ret = std::forward<F>(f)(typename BlockingEventT::template Trigger<T>{ get_event<BlockingEventT>(), *that() }); if constexpr (std::is_same_v<InterruptorT, void>) { return ret; } else { using ret_t = decltype(ret); return typename InterruptorT::template futurize_t<ret_t>{std::move(ret)}; } } public: static constexpr bool is_trackable = true; }; template <class T> class PhasedOperationT : public TrackableOperationT<T> { using base_t = TrackableOperationT<T>; T* that() { return static_cast<T*>(this); } const T* that() const { return static_cast<const T*>(this); } protected: using TrackableOperationT<T>::TrackableOperationT; template <class InterruptorT=void, class StageT> auto enter_stage(StageT& stage) { return this->template with_blocking_event<typename StageT::BlockingEvent, InterruptorT>( [&stage, this] (auto&& trigger) { // delegated storing the pipeline handle to let childs to match // the lifetime of pipeline with e.g. ConnectedSocket (important // for ConnectionPipeline). return that()->get_handle().template enter<T>(stage, std::move(trigger)); }); } template <class OpT> friend class crimson::os::seastore::OperationProxyT; // PGShardManager::start_pg_operation needs access to enter_stage, we can make this // more sophisticated later on friend class PGShardManager; }; /** * Maintains a set of lists of all active ops. */ struct OSDOperationRegistry : OperationRegistryT< static_cast<size_t>(OperationTypeCode::last_op) > { OSDOperationRegistry(); void do_stop() override; void put_historic(const class ClientRequest& op); size_t dump_historic_client_requests(ceph::Formatter* f) const; size_t dump_slowest_historic_client_requests(ceph::Formatter* f) const; private: op_list::const_iterator last_of_recents; size_t num_recent_ops = 0; size_t num_slow_ops = 0; }; /** * Throttles set of currently running operations * * Very primitive currently, assumes all ops are equally * expensive and simply limits the number that can be * concurrently active. */ class OperationThrottler : public BlockerT<OperationThrottler>, private md_config_obs_t { friend BlockerT<OperationThrottler>; static constexpr const char* type_name = "OperationThrottler"; template <typename OperationT, typename F> auto with_throttle( OperationT* op, crimson::osd::scheduler::params_t params, F &&f) { if (!max_in_progress) return f(); return acquire_throttle(params) .then(std::forward<F>(f)) .then([this](auto x) { release_throttle(); return x; }); } template <typename OperationT, typename F> seastar::future<> with_throttle_while( OperationT* op, crimson::osd::scheduler::params_t params, F &&f) { return with_throttle(op, params, f).then([this, params, op, f](bool cont) { return cont ? with_throttle_while(op, params, f) : seastar::now(); }); } public: OperationThrottler(ConfigProxy &conf); const char** get_tracked_conf_keys() const final; void handle_conf_change(const ConfigProxy& conf, const std::set<std::string> &changed) final; void update_from_config(const ConfigProxy &conf); template <class OpT, class... Args> seastar::future<> with_throttle_while( BlockingEvent::Trigger<OpT>&& trigger, Args&&... args) { return trigger.maybe_record_blocking( with_throttle_while(std::forward<Args>(args)...), *this); } private: void dump_detail(Formatter *f) const final; crimson::osd::scheduler::SchedulerRef scheduler; uint64_t max_in_progress = 0; uint64_t in_progress = 0; uint64_t pending = 0; void wake(); seastar::future<> acquire_throttle( crimson::osd::scheduler::params_t params); void release_throttle(); }; }
7,670
26.202128
85
h
null
ceph-main/src/crimson/osd/osd_operation_external_tracking.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/osd/osd.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_operations/background_recovery.h" #include "crimson/osd/osd_operations/client_request.h" #include "crimson/osd/osd_operations/peering_event.h" #include "crimson/osd/osd_operations/pg_advance_map.h" #include "crimson/osd/osd_operations/recovery_subrequest.h" #include "crimson/osd/osd_operations/replicated_request.h" #include "crimson/osd/osd_operations/snaptrim_event.h" #include "crimson/osd/pg_activation_blocker.h" #include "crimson/osd/pg_map.h" namespace crimson::osd { // Just the boilerplate currently. Implementing struct LttngBackend : ClientRequest::StartEvent::Backend, ConnectionPipeline::AwaitActive::BlockingEvent::Backend, ConnectionPipeline::AwaitMap::BlockingEvent::Backend, ConnectionPipeline::GetPG::BlockingEvent::Backend, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend, PGMap::PGCreationBlockingEvent::Backend, ClientRequest::PGPipeline::AwaitMap::BlockingEvent::Backend, PG_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend, ClientRequest::PGPipeline::WaitForActive::BlockingEvent::Backend, PGActivationBlocker::BlockingEvent::Backend, ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::Backend, ClientRequest::PGPipeline::GetOBC::BlockingEvent::Backend, ClientRequest::PGPipeline::Process::BlockingEvent::Backend, ClientRequest::PGPipeline::WaitRepop::BlockingEvent::Backend, ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend, ClientRequest::PGPipeline::SendReply::BlockingEvent::Backend, ClientRequest::CompletionEvent::Backend { void handle(ClientRequest::StartEvent&, const Operation&) override {} void handle(ConnectionPipeline::AwaitActive::BlockingEvent& ev, const Operation& op, const ConnectionPipeline::AwaitActive& blocker) override { } void handle(ConnectionPipeline::AwaitMap::BlockingEvent& ev, const Operation& op, const ConnectionPipeline::AwaitMap& blocker) override { } void handle(OSD_OSDMapGate::OSDMapBlocker::BlockingEvent&, const Operation&, const OSD_OSDMapGate::OSDMapBlocker&) override { } void handle(ConnectionPipeline::GetPG::BlockingEvent& ev, const Operation& op, const ConnectionPipeline::GetPG& blocker) override { } void handle(PGMap::PGCreationBlockingEvent&, const Operation&, const PGMap::PGCreationBlocker&) override { } void handle(ClientRequest::PGPipeline::AwaitMap::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::AwaitMap& blocker) override { } void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&, const Operation&, const PG_OSDMapGate::OSDMapBlocker&) override { } void handle(ClientRequest::PGPipeline::WaitForActive::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::WaitForActive& blocker) override { } void handle(PGActivationBlocker::BlockingEvent& ev, const Operation& op, const PGActivationBlocker& blocker) override { } void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::RecoverMissing& blocker) override { } void handle(ClientRequest::PGPipeline::GetOBC::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::GetOBC& blocker) override { } void handle(ClientRequest::PGPipeline::Process::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::Process& blocker) override { } void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::WaitRepop& blocker) override { } void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev, const Operation& op) override { } void handle(ClientRequest::PGPipeline::SendReply::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::SendReply& blocker) override { } void handle(ClientRequest::CompletionEvent&, const Operation&) override {} }; struct HistoricBackend : ClientRequest::StartEvent::Backend, ConnectionPipeline::AwaitActive::BlockingEvent::Backend, ConnectionPipeline::AwaitMap::BlockingEvent::Backend, ConnectionPipeline::GetPG::BlockingEvent::Backend, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend, PGMap::PGCreationBlockingEvent::Backend, ClientRequest::PGPipeline::AwaitMap::BlockingEvent::Backend, PG_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend, ClientRequest::PGPipeline::WaitForActive::BlockingEvent::Backend, PGActivationBlocker::BlockingEvent::Backend, ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::Backend, ClientRequest::PGPipeline::GetOBC::BlockingEvent::Backend, ClientRequest::PGPipeline::Process::BlockingEvent::Backend, ClientRequest::PGPipeline::WaitRepop::BlockingEvent::Backend, ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend, ClientRequest::PGPipeline::SendReply::BlockingEvent::Backend, ClientRequest::CompletionEvent::Backend { void handle(ClientRequest::StartEvent&, const Operation&) override {} void handle(ConnectionPipeline::AwaitActive::BlockingEvent& ev, const Operation& op, const ConnectionPipeline::AwaitActive& blocker) override { } void handle(ConnectionPipeline::AwaitMap::BlockingEvent& ev, const Operation& op, const ConnectionPipeline::AwaitMap& blocker) override { } void handle(OSD_OSDMapGate::OSDMapBlocker::BlockingEvent&, const Operation&, const OSD_OSDMapGate::OSDMapBlocker&) override { } void handle(ConnectionPipeline::GetPG::BlockingEvent& ev, const Operation& op, const ConnectionPipeline::GetPG& blocker) override { } void handle(PGMap::PGCreationBlockingEvent&, const Operation&, const PGMap::PGCreationBlocker&) override { } void handle(ClientRequest::PGPipeline::AwaitMap::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::AwaitMap& blocker) override { } void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&, const Operation&, const PG_OSDMapGate::OSDMapBlocker&) override { } void handle(ClientRequest::PGPipeline::WaitForActive::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::WaitForActive& blocker) override { } void handle(PGActivationBlocker::BlockingEvent& ev, const Operation& op, const PGActivationBlocker& blocker) override { } void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::RecoverMissing& blocker) override { } void handle(ClientRequest::PGPipeline::GetOBC::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::GetOBC& blocker) override { } void handle(ClientRequest::PGPipeline::Process::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::Process& blocker) override { } void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::WaitRepop& blocker) override { } void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev, const Operation& op) override { } void handle(ClientRequest::PGPipeline::SendReply::BlockingEvent& ev, const Operation& op, const ClientRequest::PGPipeline::SendReply& blocker) override { } static const ClientRequest& to_client_request(const Operation& op) { #ifdef NDEBUG return static_cast<const ClientRequest&>(op); #else return dynamic_cast<const ClientRequest&>(op); #endif } void handle(ClientRequest::CompletionEvent&, const Operation& op) override { if (crimson::common::local_conf()->osd_op_history_size) { to_client_request(op).put_historic(); } } }; } // namespace crimson::osd namespace crimson { template <> struct EventBackendRegistry<osd::ClientRequest> { static std::tuple<osd::LttngBackend, osd::HistoricBackend> get_backends() { return { {}, {} }; } }; template <> struct EventBackendRegistry<osd::RemotePeeringEvent> { static std::tuple<> get_backends() { return {/* no extenral backends */}; } }; template <> struct EventBackendRegistry<osd::LocalPeeringEvent> { static std::tuple<> get_backends() { return {/* no extenral backends */}; } }; template <> struct EventBackendRegistry<osd::RepRequest> { static std::tuple<> get_backends() { return {/* no extenral backends */}; } }; template <> struct EventBackendRegistry<osd::LogMissingRequest> { static std::tuple<> get_backends() { return {/* no extenral backends */}; } }; template <> struct EventBackendRegistry<osd::LogMissingRequestReply> { static std::tuple<> get_backends() { return {/* no extenral backends */}; } }; template <> struct EventBackendRegistry<osd::RecoverySubRequest> { static std::tuple<> get_backends() { return {/* no extenral backends */}; } }; template <> struct EventBackendRegistry<osd::BackfillRecovery> { static std::tuple<> get_backends() { return {}; } }; template <> struct EventBackendRegistry<osd::PGAdvanceMap> { static std::tuple<> get_backends() { return {}; } }; template <> struct EventBackendRegistry<osd::SnapTrimObjSubEvent> { static std::tuple<> get_backends() { return {}; } }; } // namespace crimson
10,245
32.266234
88
h
null
ceph-main/src/crimson/osd/osdmap_gate.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "crimson/common/exception.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/shard_services.h" #include "common/Formatter.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { template <OSDMapGateType OSDMapGateTypeV> void OSDMapGate<OSDMapGateTypeV>::OSDMapBlocker::dump_detail(Formatter *f) const { f->open_object_section("OSDMapGate"); f->dump_int("epoch", epoch); f->close_section(); } template <OSDMapGateType OSDMapGateTypeV> seastar::future<epoch_t> OSDMapGate<OSDMapGateTypeV>::wait_for_map( typename OSDMapBlocker::BlockingEvent::TriggerI&& trigger, epoch_t epoch, ShardServices *shard_services) { if (__builtin_expect(stopping, false)) { return seastar::make_exception_future<epoch_t>( crimson::common::system_shutdown_exception()); } if (current >= epoch) { return seastar::make_ready_future<epoch_t>(current); } else { logger().info("evt epoch is {}, i have {}, will wait", epoch, current); auto &blocker = waiting_peering.emplace( epoch, std::make_pair(blocker_type, epoch)).first->second; auto fut = blocker.promise.get_shared_future(); if (shard_services) { return trigger.maybe_record_blocking( shard_services->osdmap_subscribe(current, true).then( [fut=std::move(fut)]() mutable { return std::move(fut); }), blocker); } else { return trigger.maybe_record_blocking(std::move(fut), blocker); } } } template <OSDMapGateType OSDMapGateTypeV> void OSDMapGate<OSDMapGateTypeV>::got_map(epoch_t epoch) { if (epoch == 0) { return; } ceph_assert(epoch > current); current = epoch; auto first = waiting_peering.begin(); auto last = waiting_peering.upper_bound(epoch); std::for_each(first, last, [epoch](auto& blocked_requests) { blocked_requests.second.promise.set_value(epoch); }); waiting_peering.erase(first, last); } template <OSDMapGateType OSDMapGateTypeV> seastar::future<> OSDMapGate<OSDMapGateTypeV>::stop() { logger().info("osdmap::stop"); stopping = true; auto first = waiting_peering.begin(); auto last = waiting_peering.end(); std::for_each(first, last, [](auto& blocked_requests) { blocked_requests.second.promise.set_exception( crimson::common::system_shutdown_exception()); }); return seastar::now(); } template class OSDMapGate<OSDMapGateType::PG>; template class OSDMapGate<OSDMapGateType::OSD>; } // namespace crimson::osd
2,578
28.643678
80
cc
null
ceph-main/src/crimson/osd/osdmap_gate.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <functional> #include <map> #include <optional> #include <seastar/core/future.hh> #include <seastar/core/shared_future.hh> #include "include/types.h" #include "crimson/osd/osd_operation.h" namespace ceph { class Formatter; } namespace crimson::osd { class ShardServices; enum class OSDMapGateType { OSD, PG, }; template <OSDMapGateType OSDMapGateTypeV> class OSDMapGate { public: struct OSDMapBlocker : BlockerT<OSDMapBlocker> { const char * type_name; epoch_t epoch; OSDMapBlocker(std::pair<const char *, epoch_t> args) : type_name(args.first), epoch(args.second) {} OSDMapBlocker(const OSDMapBlocker &) = delete; OSDMapBlocker(OSDMapBlocker &&) = delete; OSDMapBlocker &operator=(const OSDMapBlocker &) = delete; OSDMapBlocker &operator=(OSDMapBlocker &&) = delete; seastar::shared_promise<epoch_t> promise; void dump_detail(Formatter *f) const final; }; using Blocker = OSDMapBlocker; private: // order the promises in ascending order of the waited osdmap epoch, // so we can access all the waiters expecting a map whose epoch is less // than or equal to a given epoch using waiting_peering_t = std::map<epoch_t, OSDMapBlocker>; const char *blocker_type; waiting_peering_t waiting_peering; epoch_t current = 0; bool stopping = false; public: OSDMapGate(const char *blocker_type) : blocker_type(blocker_type) {} /** * wait_for_map * * Wait for an osdmap whose epoch is greater or equal to given epoch. * If shard_services is non-null, request map if not present. */ seastar::future<epoch_t> wait_for_map( typename OSDMapBlocker::BlockingEvent::TriggerI&& trigger, epoch_t epoch, ShardServices *shard_services=nullptr ); void got_map(epoch_t epoch); seastar::future<> stop(); }; using OSD_OSDMapGate = OSDMapGate<OSDMapGateType::OSD>; using PG_OSDMapGate = OSDMapGate<OSDMapGateType::PG>; }
2,053
23.452381
73
h
null
ceph-main/src/crimson/osd/osdmap_service.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "include/types.h" #include "osd/OSDMap.h" class OSDMap; class OSDMapService { public: using cached_map_t = OSDMapRef; using local_cached_map_t = LocalOSDMapRef; virtual ~OSDMapService() = default; virtual seastar::future<cached_map_t> get_map(epoch_t e) = 0; /// get the latest map virtual cached_map_t get_map() const = 0; virtual epoch_t get_up_epoch() const = 0; };
508
22.136364
70
h
null
ceph-main/src/crimson/osd/pg.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #include "pg.h" #include <functional> #include <boost/range/adaptor/filtered.hpp> #include <boost/range/adaptor/map.hpp> #include <boost/range/adaptor/transformed.hpp> #include <boost/range/algorithm/copy.hpp> #include <boost/range/algorithm/max_element.hpp> #include <boost/range/numeric.hpp> #include <fmt/format.h> #include <fmt/ostream.h> #include "common/hobject_fmt.h" #include "messages/MOSDOp.h" #include "messages/MOSDOpReply.h" #include "messages/MOSDRepOp.h" #include "messages/MOSDRepOpReply.h" #include "osd/OSDMap.h" #include "osd/osd_types_fmt.h" #include "os/Transaction.h" #include "crimson/common/exception.h" #include "crimson/net/Connection.h" #include "crimson/net/Messenger.h" #include "crimson/os/cyanstore/cyan_store.h" #include "crimson/os/futurized_collection.h" #include "crimson/osd/exceptions.h" #include "crimson/osd/pg_meta.h" #include "crimson/osd/pg_backend.h" #include "crimson/osd/ops_executer.h" #include "crimson/osd/osd_operations/osdop_params.h" #include "crimson/osd/osd_operations/peering_event.h" #include "crimson/osd/osd_operations/background_recovery.h" #include "crimson/osd/osd_operations/snaptrim_event.h" #include "crimson/osd/pg_recovery.h" #include "crimson/osd/replicated_recovery_backend.h" #include "crimson/osd/watch.h" using std::ostream; using std::set; using std::string; using std::vector; namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace std::chrono { std::ostream& operator<<(std::ostream& out, const signedspan& d) { auto s = std::chrono::duration_cast<std::chrono::seconds>(d).count(); auto ns = std::abs((d % 1s).count()); fmt::print(out, "{}{}s", s, ns ? fmt::format(".{:0>9}", ns) : ""); return out; } } template <typename T> struct fmt::formatter<std::optional<T>> : fmt::formatter<T> { template <typename FormatContext> auto format(const std::optional<T>& v, FormatContext& ctx) const { if (v.has_value()) { return fmt::formatter<T>::format(*v, ctx); } return fmt::format_to(ctx.out(), "<null>"); } }; namespace crimson::osd { using crimson::common::local_conf; class RecoverablePredicate : public IsPGRecoverablePredicate { public: bool operator()(const set<pg_shard_t> &have) const override { return !have.empty(); } }; class ReadablePredicate: public IsPGReadablePredicate { pg_shard_t whoami; public: explicit ReadablePredicate(pg_shard_t whoami) : whoami(whoami) {} bool operator()(const set<pg_shard_t> &have) const override { return have.count(whoami); } }; PG::PG( spg_t pgid, pg_shard_t pg_shard, crimson::os::CollectionRef coll_ref, pg_pool_t&& pool, std::string&& name, cached_map_t osdmap, ShardServices &shard_services, ec_profile_t profile) : pgid{pgid}, pg_whoami{pg_shard}, coll_ref{coll_ref}, pgmeta_oid{pgid.make_pgmeta_oid()}, osdmap_gate("PG::osdmap_gate"), shard_services{shard_services}, backend( PGBackend::create( pgid.pgid, pg_shard, pool, coll_ref, shard_services, profile, *this)), recovery_backend( std::make_unique<ReplicatedRecoveryBackend>( *this, shard_services, coll_ref, backend.get())), recovery_handler( std::make_unique<PGRecovery>(this)), peering_state( shard_services.get_cct(), pg_shard, pgid, PGPool( osdmap, pgid.pool(), pool, name), osdmap, this, this), obc_registry{ local_conf()}, obc_loader{ obc_registry, *backend.get(), *this}, osdriver( &shard_services.get_store(), coll_ref, pgid.make_pgmeta_oid()), snap_mapper( this->shard_services.get_cct(), &osdriver, pgid.ps(), pgid.get_split_bits(pool.get_pg_num()), pgid.pool(), pgid.shard), wait_for_active_blocker(this) { peering_state.set_backend_predicates( new ReadablePredicate(pg_whoami), new RecoverablePredicate()); osdmap_gate.got_map(osdmap->get_epoch()); } PG::~PG() {} void PG::check_blocklisted_watchers() { logger().debug("{}", __func__); obc_registry.for_each([this](ObjectContextRef obc) { assert(obc); for (const auto& [key, watch] : obc->watchers) { assert(watch->get_pg() == this); const auto& ea = watch->get_peer_addr(); logger().debug("watch: Found {} cookie {}. Checking entity_add_t {}", watch->get_entity(), watch->get_cookie(), ea); if (get_osdmap()->is_blocklisted(ea)) { logger().info("watch: Found blocklisted watcher for {}", ea); watch->do_watch_timeout(); } } }); } bool PG::try_flush_or_schedule_async() { logger().debug("PG::try_flush_or_schedule_async: flush ..."); (void)shard_services.get_store().flush( coll_ref ).then( [this, epoch=get_osdmap_epoch()]() { return shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, epoch, epoch, PeeringState::IntervalFlush()); }); return false; } void PG::publish_stats_to_osd() { if (!is_primary()) return; if (auto new_pg_stats = peering_state.prepare_stats_for_publish( pg_stats, object_stat_collection_t()); new_pg_stats.has_value()) { pg_stats = std::move(new_pg_stats); } } void PG::clear_publish_stats() { pg_stats.reset(); } pg_stat_t PG::get_stats() const { return pg_stats.value_or(pg_stat_t{}); } void PG::queue_check_readable(epoch_t last_peering_reset, ceph::timespan delay) { // handle the peering event in the background logger().debug( "{}: PG::queue_check_readable lpr: {}, delay: {}", *this, last_peering_reset, delay); check_readable_timer.cancel(); check_readable_timer.set_callback([last_peering_reset, this] { logger().debug( "{}: PG::queue_check_readable callback lpr: {}", *this, last_peering_reset); (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, last_peering_reset, last_peering_reset, PeeringState::CheckReadable{}); }); check_readable_timer.arm( std::chrono::duration_cast<seastar::lowres_clock::duration>(delay)); } void PG::recheck_readable() { bool changed = false; const auto mnow = shard_services.get_mnow(); if (peering_state.state_test(PG_STATE_WAIT)) { auto prior_readable_until_ub = peering_state.get_prior_readable_until_ub(); if (mnow < prior_readable_until_ub) { logger().info( "{}: {} will wait (mnow {} < prior_readable_until_ub {})", *this, __func__, mnow, prior_readable_until_ub); queue_check_readable( peering_state.get_last_peering_reset(), prior_readable_until_ub - mnow); } else { logger().info( "{}:{} no longer wait (mnow {} >= prior_readable_until_ub {})", *this, __func__, mnow, prior_readable_until_ub); peering_state.state_clear(PG_STATE_WAIT); peering_state.clear_prior_readable_until_ub(); changed = true; } } if (peering_state.state_test(PG_STATE_LAGGY)) { auto readable_until = peering_state.get_readable_until(); if (readable_until == readable_until.zero()) { logger().info( "{}:{} still laggy (mnow {}, readable_until zero)", *this, __func__, mnow); } else if (mnow >= readable_until) { logger().info( "{}:{} still laggy (mnow {} >= readable_until {})", *this, __func__, mnow, readable_until); } else { logger().info( "{}:{} no longer laggy (mnow {} < readable_until {})", *this, __func__, mnow, readable_until); peering_state.state_clear(PG_STATE_LAGGY); changed = true; } } if (changed) { publish_stats_to_osd(); if (!peering_state.state_test(PG_STATE_WAIT) && !peering_state.state_test(PG_STATE_LAGGY)) { // TODO: requeue ops waiting for readable } } } unsigned PG::get_target_pg_log_entries() const { const unsigned local_num_pgs = shard_services.get_num_local_pgs(); const unsigned local_target = local_conf().get_val<uint64_t>("osd_target_pg_log_entries_per_osd") / seastar::smp::count; const unsigned min_pg_log_entries = local_conf().get_val<uint64_t>("osd_min_pg_log_entries"); if (local_num_pgs > 0 && local_target > 0) { // target an even spread of our budgeted log entries across all // PGs. note that while we only get to control the entry count // for primary PGs, we'll normally be responsible for a mix of // primary and replica PGs (for the same pool(s) even), so this // will work out. const unsigned max_pg_log_entries = local_conf().get_val<uint64_t>("osd_max_pg_log_entries"); return std::clamp(local_target / local_num_pgs, min_pg_log_entries, max_pg_log_entries); } else { // fall back to a per-pg value. return min_pg_log_entries; } } void PG::on_removal(ceph::os::Transaction &t) { t.register_on_commit( new LambdaContext( [this](int r) { ceph_assert(r == 0); (void)shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, float(0.001), get_osdmap_epoch(), get_osdmap_epoch(), PeeringState::DeleteSome()); })); } void PG::on_activate(interval_set<snapid_t> snaps) { logger().debug("{}: {} snaps={}", *this, __func__, snaps); snap_trimq = std::move(snaps); projected_last_update = peering_state.get_info().last_update; } void PG::on_activate_complete() { wait_for_active_blocker.unblock(); if (peering_state.needs_recovery()) { logger().info("{}: requesting recovery", __func__); (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, float(0.001), get_osdmap_epoch(), get_osdmap_epoch(), PeeringState::DoRecovery{}); } else if (peering_state.needs_backfill()) { logger().info("{}: requesting backfill", __func__); (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, float(0.001), get_osdmap_epoch(), get_osdmap_epoch(), PeeringState::RequestBackfill{}); } else { logger().debug("{}: no need to recover or backfill, AllReplicasRecovered", " for pg: {}", __func__, pgid); (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, float(0.001), get_osdmap_epoch(), get_osdmap_epoch(), PeeringState::AllReplicasRecovered{}); } publish_stats_to_osd(); } void PG::prepare_write(pg_info_t &info, pg_info_t &last_written_info, PastIntervals &past_intervals, PGLog &pglog, bool dirty_info, bool dirty_big_info, bool need_write_epoch, ceph::os::Transaction &t) { std::map<string,bufferlist> km; std::string key_to_remove; if (dirty_big_info || dirty_info) { int ret = prepare_info_keymap( shard_services.get_cct(), &km, &key_to_remove, get_osdmap_epoch(), info, last_written_info, past_intervals, dirty_big_info, need_write_epoch, true, nullptr, this); ceph_assert(ret == 0); } pglog.write_log_and_missing( t, &km, coll_ref->get_cid(), pgmeta_oid, peering_state.get_pgpool().info.require_rollback()); if (!km.empty()) { t.omap_setkeys(coll_ref->get_cid(), pgmeta_oid, km); } if (!key_to_remove.empty()) { t.omap_rmkey(coll_ref->get_cid(), pgmeta_oid, key_to_remove); } } std::pair<ghobject_t, bool> PG::do_delete_work(ceph::os::Transaction &t, ghobject_t _next) { logger().info("removing pg {}", pgid); auto fut = interruptor::make_interruptible( shard_services.get_store().list_objects( coll_ref, _next, ghobject_t::get_max(), local_conf()->osd_target_transaction_size)); auto [objs_to_rm, next] = fut.get(); if (objs_to_rm.empty()) { logger().info("all objs removed, removing coll for {}", pgid); t.remove(coll_ref->get_cid(), pgmeta_oid); t.remove_collection(coll_ref->get_cid()); (void) shard_services.get_store().do_transaction( coll_ref, std::move(t)).then([this] { return shard_services.remove_pg(pgid); }); return {next, false}; } else { for (auto &obj : objs_to_rm) { if (obj == pgmeta_oid) { continue; } logger().trace("pg {}, removing obj {}", pgid, obj); t.remove(coll_ref->get_cid(), obj); } t.register_on_commit( new LambdaContext([this](int r) { ceph_assert(r == 0); logger().trace("triggering more pg delete {}", pgid); (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, float(0.001), get_osdmap_epoch(), get_osdmap_epoch(), PeeringState::DeleteSome{}); })); return {next, true}; } } Context *PG::on_clean() { // Not needed yet (will be needed for IO unblocking) return nullptr; } void PG::on_active_actmap() { logger().debug("{}: {} snap_trimq={}", *this, __func__, snap_trimq); peering_state.state_clear(PG_STATE_SNAPTRIM_ERROR); // loops until snap_trimq is empty or SNAPTRIM_ERROR. std::ignore = seastar::do_until( [this] { return snap_trimq.empty() || peering_state.state_test(PG_STATE_SNAPTRIM_ERROR); }, [this] { peering_state.state_set(PG_STATE_SNAPTRIM); publish_stats_to_osd(); const auto to_trim = snap_trimq.range_start(); snap_trimq.erase(to_trim); const auto needs_pause = !snap_trimq.empty(); return seastar::repeat([to_trim, needs_pause, this] { logger().debug("{}: going to start SnapTrimEvent, to_trim={}", *this, to_trim); return shard_services.start_operation<SnapTrimEvent>( this, snap_mapper, to_trim, needs_pause ).second.handle_error( crimson::ct_error::enoent::handle([this] { logger().error("{}: ENOENT saw, trimming stopped", *this); peering_state.state_set(PG_STATE_SNAPTRIM_ERROR); publish_stats_to_osd(); return seastar::make_ready_future<seastar::stop_iteration>( seastar::stop_iteration::yes); }), crimson::ct_error::eagain::handle([this] { logger().info("{}: EAGAIN saw, trimming restarted", *this); return seastar::make_ready_future<seastar::stop_iteration>( seastar::stop_iteration::no); }) ); }).then([this, trimmed=to_trim] { logger().debug("{}: trimmed snap={}", *this, trimmed); }); }).finally([this] { logger().debug("{}: PG::on_active_actmap() finished trimming", *this); peering_state.state_clear(PG_STATE_SNAPTRIM); peering_state.state_clear(PG_STATE_SNAPTRIM_ERROR); publish_stats_to_osd(); }); } void PG::on_active_advmap(const OSDMapRef &osdmap) { const auto new_removed_snaps = osdmap->get_new_removed_snaps(); if (auto it = new_removed_snaps.find(get_pgid().pool()); it != new_removed_snaps.end()) { bool bad = false; for (auto j : it->second) { if (snap_trimq.intersects(j.first, j.second)) { decltype(snap_trimq) added, overlap; added.insert(j.first, j.second); overlap.intersection_of(snap_trimq, added); logger().error("{}: {} removed_snaps already contains {}", *this, __func__, overlap); bad = true; snap_trimq.union_of(added); } else { snap_trimq.insert(j.first, j.second); } } logger().info("{}: {} new removed snaps {}, snap_trimq now{}", *this, __func__, it->second, snap_trimq); assert(!bad || !local_conf().get_val<bool>("osd_debug_verify_cached_snaps")); } } void PG::scrub_requested(scrub_level_t scrub_level, scrub_type_t scrub_type) { // TODO: should update the stats upon finishing the scrub peering_state.update_stats([scrub_level, this](auto& history, auto& stats) { const utime_t now = ceph_clock_now(); history.last_scrub = peering_state.get_info().last_update; history.last_scrub_stamp = now; history.last_clean_scrub_stamp = now; if (scrub_level == scrub_level_t::deep) { history.last_deep_scrub = history.last_scrub; history.last_deep_scrub_stamp = now; } // yes, please publish the stats return true; }); } void PG::log_state_enter(const char *state) { logger().info("Entering state: {}", state); } void PG::log_state_exit( const char *state_name, utime_t enter_time, uint64_t events, utime_t event_dur) { logger().info( "Exiting state: {}, entered at {}, {} spent on {} events", state_name, enter_time, event_dur, events); } ceph::signedspan PG::get_mnow() const { return shard_services.get_mnow(); } HeartbeatStampsRef PG::get_hb_stamps(int peer) { return shard_services.get_hb_stamps(peer); } void PG::schedule_renew_lease(epoch_t last_peering_reset, ceph::timespan delay) { // handle the peering event in the background renew_lease_timer.cancel(); renew_lease_timer.set_callback([last_peering_reset, this] { (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, last_peering_reset, last_peering_reset, RenewLease{}); }); renew_lease_timer.arm( std::chrono::duration_cast<seastar::lowres_clock::duration>(delay)); } void PG::init( int role, const vector<int>& newup, int new_up_primary, const vector<int>& newacting, int new_acting_primary, const pg_history_t& history, const PastIntervals& pi, ObjectStore::Transaction &t) { peering_state.init( role, newup, new_up_primary, newacting, new_acting_primary, history, pi, t); } seastar::future<> PG::read_state(crimson::os::FuturizedStore::Shard* store) { if (__builtin_expect(stopping, false)) { return seastar::make_exception_future<>( crimson::common::system_shutdown_exception()); } return seastar::do_with(PGMeta(*store, pgid), [] (auto& pg_meta) { return pg_meta.load(); }).then([this, store](auto&& ret) { auto [pg_info, past_intervals] = std::move(ret); return peering_state.init_from_disk_state( std::move(pg_info), std::move(past_intervals), [this, store] (PGLog &pglog) { return pglog.read_log_and_missing_crimson( *store, coll_ref, peering_state.get_info(), pgmeta_oid); }); }).then([this]() { int primary, up_primary; vector<int> acting, up; peering_state.get_osdmap()->pg_to_up_acting_osds( pgid.pgid, &up, &up_primary, &acting, &primary); peering_state.init_primary_up_acting( up, acting, up_primary, primary); int rr = OSDMap::calc_pg_role(pg_whoami, acting); peering_state.set_role(rr); epoch_t epoch = get_osdmap_epoch(); (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, epoch, epoch, PeeringState::Initialize()); return seastar::now(); }); } PG::interruptible_future<> PG::do_peering_event( PGPeeringEvent& evt, PeeringCtx &rctx) { if (peering_state.pg_has_reset_since(evt.get_epoch_requested()) || peering_state.pg_has_reset_since(evt.get_epoch_sent())) { logger().debug("{} ignoring {} -- pg has reset", __func__, evt.get_desc()); return interruptor::now(); } else { logger().debug("{} handling {} for pg: {}", __func__, evt.get_desc(), pgid); // all peering event handling needs to be run in a dedicated seastar::thread, // so that event processing can involve I/O reqs freely, for example: PG::on_removal, // PG::on_new_interval return interruptor::async([this, &evt, &rctx] { peering_state.handle_event( evt.get_event(), &rctx); peering_state.write_if_dirty(rctx.transaction); }); } } seastar::future<> PG::handle_advance_map( cached_map_t next_map, PeeringCtx &rctx) { return seastar::async([this, next_map=std::move(next_map), &rctx] { vector<int> newup, newacting; int up_primary, acting_primary; next_map->pg_to_up_acting_osds( pgid.pgid, &newup, &up_primary, &newacting, &acting_primary); peering_state.advance_map( next_map, peering_state.get_osdmap(), newup, up_primary, newacting, acting_primary, rctx); osdmap_gate.got_map(next_map->get_epoch()); }); } seastar::future<> PG::handle_activate_map(PeeringCtx &rctx) { return seastar::async([this, &rctx] { peering_state.activate_map(rctx); }); } seastar::future<> PG::handle_initialize(PeeringCtx &rctx) { return seastar::async([this, &rctx] { peering_state.handle_event(PeeringState::Initialize{}, &rctx); }); } void PG::print(ostream& out) const { out << peering_state << " "; } void PG::dump_primary(Formatter* f) { peering_state.dump_peering_state(f); f->open_array_section("recovery_state"); PeeringState::QueryState q(f); peering_state.handle_event(q, 0); f->close_section(); // TODO: snap_trimq // TODO: scrubber state // TODO: agent state } std::ostream& operator<<(std::ostream& os, const PG& pg) { os << " pg_epoch " << pg.get_osdmap_epoch() << " "; pg.print(os); return os; } std::tuple<PG::interruptible_future<>, PG::interruptible_future<>> PG::submit_transaction( ObjectContextRef&& obc, ceph::os::Transaction&& txn, osd_op_params_t&& osd_op_p, std::vector<pg_log_entry_t>&& log_entries) { if (__builtin_expect(stopping, false)) { return {seastar::make_exception_future<>( crimson::common::system_shutdown_exception()), seastar::now()}; } epoch_t map_epoch = get_osdmap_epoch(); ceph_assert(!has_reset_since(osd_op_p.at_version.epoch)); peering_state.pre_submit_op(obc->obs.oi.soid, log_entries, osd_op_p.at_version); peering_state.append_log_with_trim_to_updated(std::move(log_entries), osd_op_p.at_version, txn, true, false); auto [submitted, all_completed] = backend->mutate_object( peering_state.get_acting_recovery_backfill(), std::move(obc), std::move(txn), std::move(osd_op_p), peering_state.get_last_peering_reset(), map_epoch, std::move(log_entries)); return std::make_tuple(std::move(submitted), all_completed.then_interruptible( [this, last_complete=peering_state.get_info().last_complete, at_version=osd_op_p.at_version](auto acked) { for (const auto& peer : acked) { peering_state.update_peer_last_complete_ondisk( peer.shard, peer.last_complete_ondisk); } peering_state.complete_write(at_version, last_complete); return seastar::now(); })); } PG::interruptible_future<> PG::repair_object( const hobject_t& oid, eversion_t& v) { // see also PrimaryLogPG::rep_repair_primary_object() assert(is_primary()); logger().debug("{}: {} peers osd.{}", __func__, oid, get_acting_recovery_backfill()); // Add object to PG's missing set if it isn't there already assert(!get_local_missing().is_missing(oid)); peering_state.force_object_missing(pg_whoami, oid, v); auto [op, fut] = get_shard_services().start_operation<UrgentRecovery>( oid, v, this, get_shard_services(), get_osdmap_epoch()); return std::move(fut); } template <class Ret, class SuccessFunc, class FailureFunc> PG::do_osd_ops_iertr::future<PG::pg_rep_op_fut_t<Ret>> PG::do_osd_ops_execute( seastar::lw_shared_ptr<OpsExecuter> ox, std::vector<OSDOp>& ops, SuccessFunc&& success_func, FailureFunc&& failure_func) { assert(ox); auto rollbacker = ox->create_rollbacker([this] (auto& obc) { return obc_loader.reload_obc(obc).handle_error_interruptible( load_obc_ertr::assert_all{"can't live with object state messed up"}); }); auto failure_func_ptr = seastar::make_lw_shared(std::move(failure_func)); return interruptor::do_for_each(ops, [ox](OSDOp& osd_op) { logger().debug( "do_osd_ops_execute: object {} - handling op {}", ox->get_target(), ceph_osd_op_name(osd_op.op.op)); return ox->execute_op(osd_op); }).safe_then_interruptible([this, ox, &ops] { logger().debug( "do_osd_ops_execute: object {} all operations successful", ox->get_target()); // check for full if ((ox->delta_stats.num_bytes > 0 || ox->delta_stats.num_objects > 0) && get_pgpool().info.has_flag(pg_pool_t::FLAG_FULL)) { const auto& m = ox->get_message(); if (m.get_reqid().name.is_mds() || // FIXME: ignore MDS for now m.has_flag(CEPH_OSD_FLAG_FULL_FORCE)) { logger().info(" full, but proceeding due to FULL_FORCE or MDS"); } else if (m.has_flag(CEPH_OSD_FLAG_FULL_TRY)) { // they tried, they failed. logger().info(" full, replying to FULL_TRY op"); if (get_pgpool().info.has_flag(pg_pool_t::FLAG_FULL_QUOTA)) return interruptor::make_ready_future<OpsExecuter::rep_op_fut_tuple>( seastar::now(), OpsExecuter::osd_op_ierrorator::future<>( crimson::ct_error::edquot::make())); else return interruptor::make_ready_future<OpsExecuter::rep_op_fut_tuple>( seastar::now(), OpsExecuter::osd_op_ierrorator::future<>( crimson::ct_error::enospc::make())); } else { // drop request logger().info(" full, dropping request (bad client)"); return interruptor::make_ready_future<OpsExecuter::rep_op_fut_tuple>( seastar::now(), OpsExecuter::osd_op_ierrorator::future<>( crimson::ct_error::eagain::make())); } } return std::move(*ox).flush_changes_n_do_ops_effects( ops, snap_mapper, osdriver, [this] (auto&& txn, auto&& obc, auto&& osd_op_p, auto&& log_entries) { logger().debug( "do_osd_ops_execute: object {} submitting txn", obc->get_oid()); return submit_transaction( std::move(obc), std::move(txn), std::move(osd_op_p), std::move(log_entries)); }); }).safe_then_unpack_interruptible( [success_func=std::move(success_func), rollbacker, this, failure_func_ptr] (auto submitted_fut, auto all_completed_fut) mutable { return PG::do_osd_ops_iertr::make_ready_future<pg_rep_op_fut_t<Ret>>( std::move(submitted_fut), all_completed_fut.safe_then_interruptible_tuple( std::move(success_func), crimson::ct_error::object_corrupted::handle( [rollbacker, this] (const std::error_code& e) mutable { // this is a path for EIO. it's special because we want to fix the obejct // and try again. that is, the layer above `PG::do_osd_ops` is supposed to // restart the execution. return rollbacker.rollback_obc_if_modified(e).then_interruptible( [obc=rollbacker.get_obc(), this] { return repair_object(obc->obs.oi.soid, obc->obs.oi.version).then_interruptible([] { return do_osd_ops_iertr::future<Ret>{crimson::ct_error::eagain::make()}; }); }); }), OpsExecuter::osd_op_errorator::all_same_way( [rollbacker, failure_func_ptr] (const std::error_code& e) mutable { return rollbacker.rollback_obc_if_modified(e).then_interruptible( [e, failure_func_ptr] { return (*failure_func_ptr)(e); }); }) ) ); }, OpsExecuter::osd_op_errorator::all_same_way( [rollbacker, failure_func_ptr] (const std::error_code& e) mutable { return PG::do_osd_ops_iertr::make_ready_future<pg_rep_op_fut_t<Ret>>( seastar::now(), e.value() == ENOENT ? (*failure_func_ptr)(e) : rollbacker.rollback_obc_if_modified(e).then_interruptible( [e, failure_func_ptr] { return (*failure_func_ptr)(e); })); })); } seastar::future<> PG::submit_error_log( Ref<MOSDOp> m, const OpInfo &op_info, ObjectContextRef obc, const std::error_code e, ceph_tid_t rep_tid, eversion_t &version) { const osd_reqid_t &reqid = m->get_reqid(); mempool::osd_pglog::list<pg_log_entry_t> log_entries; log_entries.push_back(pg_log_entry_t(pg_log_entry_t::ERROR, obc->obs.oi.soid, next_version(), eversion_t(), 0, reqid, utime_t(), -e.value())); if (op_info.allows_returnvec()) { log_entries.back().set_op_returns(m->ops); } ceph_assert(is_primary()); if (!log_entries.empty()) { ceph_assert(log_entries.rbegin()->version >= projected_last_update); version = projected_last_update = log_entries.rbegin()->version; } ceph::os::Transaction t; peering_state.merge_new_log_entries( log_entries, t, peering_state.get_pg_trim_to(), peering_state.get_min_last_complete_ondisk()); set<pg_shard_t> waiting_on; for (auto &i : get_acting_recovery_backfill()) { pg_shard_t peer(i); if (peer == pg_whoami) continue; ceph_assert(peering_state.get_peer_missing().count(peer)); ceph_assert(peering_state.has_peer_info(peer)); auto log_m = crimson::make_message<MOSDPGUpdateLogMissing>( log_entries, spg_t(peering_state.get_info().pgid.pgid, i.shard), pg_whoami.shard, get_osdmap_epoch(), get_last_peering_reset(), rep_tid, peering_state.get_pg_trim_to(), peering_state.get_min_last_complete_ondisk()); send_cluster_message(peer.osd, std::move(log_m), get_osdmap_epoch()); waiting_on.insert(peer); } waiting_on.insert(pg_whoami); log_entry_update_waiting_on.insert( std::make_pair(rep_tid, log_update_t{std::move(waiting_on)})); return shard_services.get_store().do_transaction( get_collection_ref(), std::move(t)) .then([this] { peering_state.update_trim_to(); return seastar::now(); }); } PG::do_osd_ops_iertr::future<PG::pg_rep_op_fut_t<MURef<MOSDOpReply>>> PG::do_osd_ops( Ref<MOSDOp> m, crimson::net::ConnectionRef conn, ObjectContextRef obc, const OpInfo &op_info, const SnapContext& snapc) { if (__builtin_expect(stopping, false)) { throw crimson::common::system_shutdown_exception(); } return do_osd_ops_execute<MURef<MOSDOpReply>>( seastar::make_lw_shared<OpsExecuter>( Ref<PG>{this}, obc, op_info, *m, conn, snapc), m->ops, [this, m, obc, may_write = op_info.may_write(), may_read = op_info.may_read(), rvec = op_info.allows_returnvec()] { // TODO: should stop at the first op which returns a negative retval, // cmpext uses it for returning the index of first unmatched byte int result = m->ops.empty() ? 0 : m->ops.back().rval.code; if (may_read && result >= 0) { for (auto &osdop : m->ops) { if (osdop.rval < 0 && !(osdop.op.flags & CEPH_OSD_OP_FLAG_FAILOK)) { result = osdop.rval.code; break; } } } else if (result > 0 && may_write && !rvec) { result = 0; } else if (result < 0 && (m->ops.empty() ? 0 : m->ops.back().op.flags & CEPH_OSD_OP_FLAG_FAILOK)) { result = 0; } auto reply = crimson::make_message<MOSDOpReply>(m.get(), result, get_osdmap_epoch(), 0, false); reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK); logger().debug( "do_osd_ops: {} - object {} sending reply", *m, m->get_hobj()); if (obc->obs.exists) { reply->set_reply_versions(peering_state.get_info().last_update, obc->obs.oi.user_version); } else { reply->set_reply_versions(peering_state.get_info().last_update, peering_state.get_info().last_user_version); } return do_osd_ops_iertr::make_ready_future<MURef<MOSDOpReply>>( std::move(reply)); }, [m, &op_info, obc, this] (const std::error_code& e) { return seastar::do_with(eversion_t(), [m, &op_info, obc, e, this](auto &version) { auto fut = seastar::now(); epoch_t epoch = get_osdmap_epoch(); ceph_tid_t rep_tid = shard_services.get_tid(); auto last_complete = peering_state.get_info().last_complete; if (op_info.may_write()) { fut = submit_error_log(m, op_info, obc, e, rep_tid, version); } return fut.then([m, e, epoch, &op_info, rep_tid, &version, last_complete, this] { auto log_reply = [m, e, this] { auto reply = crimson::make_message<MOSDOpReply>( m.get(), -e.value(), get_osdmap_epoch(), 0, false); if (m->ops.empty() ? 0 : m->ops.back().op.flags & CEPH_OSD_OP_FLAG_FAILOK) { reply->set_result(0); } // For all ops except for CMPEXT, the correct error value is encoded // in e.value(). For CMPEXT, osdop.rval has the actual error value. if (e.value() == ct_error::cmp_fail_error_value) { assert(!m->ops.empty()); for (auto &osdop : m->ops) { if (osdop.rval < 0) { reply->set_result(osdop.rval); break; } } } reply->set_enoent_reply_versions( peering_state.get_info().last_update, peering_state.get_info().last_user_version); reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK); return do_osd_ops_iertr::make_ready_future<MURef<MOSDOpReply>>( std::move(reply)); }; if (!peering_state.pg_has_reset_since(epoch) && op_info.may_write()) { auto it = log_entry_update_waiting_on.find(rep_tid); ceph_assert(it != log_entry_update_waiting_on.end()); auto it2 = it->second.waiting_on.find(pg_whoami); ceph_assert(it2 != it->second.waiting_on.end()); it->second.waiting_on.erase(it2); if (it->second.waiting_on.empty()) { log_entry_update_waiting_on.erase(it); if (version != eversion_t()) { peering_state.complete_write(version, last_complete); } return log_reply(); } else { return it->second.all_committed.get_shared_future() .then([this, &version, last_complete, log_reply = std::move(log_reply)] { if (version != eversion_t()) { peering_state.complete_write(version, last_complete); } return log_reply(); }); } } else { return log_reply(); } }); }); }); } PG::do_osd_ops_iertr::future<PG::pg_rep_op_fut_t<>> PG::do_osd_ops( ObjectContextRef obc, std::vector<OSDOp>& ops, const OpInfo &op_info, const do_osd_ops_params_t &&msg_params, do_osd_ops_success_func_t success_func, do_osd_ops_failure_func_t failure_func) { // This overload is generally used for internal client requests, // use an empty SnapContext. return seastar::do_with( std::move(msg_params), [=, this, &ops, &op_info](auto &msg_params) { return do_osd_ops_execute<void>( seastar::make_lw_shared<OpsExecuter>( Ref<PG>{this}, std::move(obc), op_info, msg_params, msg_params.get_connection(), SnapContext{} ), ops, std::move(success_func), std::move(failure_func)); }); } PG::interruptible_future<MURef<MOSDOpReply>> PG::do_pg_ops(Ref<MOSDOp> m) { if (__builtin_expect(stopping, false)) { throw crimson::common::system_shutdown_exception(); } auto ox = std::make_unique<PgOpsExecuter>(std::as_const(*this), std::as_const(*m)); return interruptor::do_for_each(m->ops, [ox = ox.get()](OSDOp& osd_op) { logger().debug("will be handling pg op {}", ceph_osd_op_name(osd_op.op.op)); return ox->execute_op(osd_op); }).then_interruptible([m, this, ox = std::move(ox)] { auto reply = crimson::make_message<MOSDOpReply>(m.get(), 0, get_osdmap_epoch(), CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK, false); reply->claim_op_out_data(m->ops); reply->set_reply_versions(peering_state.get_info().last_update, peering_state.get_info().last_user_version); return seastar::make_ready_future<MURef<MOSDOpReply>>(std::move(reply)); }).handle_exception_type_interruptible([=, this](const crimson::osd::error& e) { auto reply = crimson::make_message<MOSDOpReply>( m.get(), -e.code().value(), get_osdmap_epoch(), 0, false); reply->set_enoent_reply_versions(peering_state.get_info().last_update, peering_state.get_info().last_user_version); return seastar::make_ready_future<MURef<MOSDOpReply>>(std::move(reply)); }); } hobject_t PG::get_oid(const hobject_t& hobj) { return hobj.snap == CEPH_SNAPDIR ? hobj.get_head() : hobj; } RWState::State PG::get_lock_type(const OpInfo &op_info) { if (op_info.rwordered() && op_info.may_read()) { return RWState::RWEXCL; } else if (op_info.rwordered()) { return RWState::RWWRITE; } else { ceph_assert(op_info.may_read()); return RWState::RWREAD; } } void PG::check_blocklisted_obc_watchers( ObjectContextRef &obc) { if (obc->watchers.empty()) { for (auto &[src, winfo] : obc->obs.oi.watchers) { auto watch = crimson::osd::Watch::create( obc, winfo, src.second, this); watch->disconnect(); auto [it, emplaced] = obc->watchers.emplace(src, std::move(watch)); assert(emplaced); logger().debug("added watch for obj {}, client {}", obc->get_oid(), src.second); } } } PG::load_obc_iertr::future<> PG::with_locked_obc(const hobject_t &hobj, const OpInfo &op_info, with_obc_func_t &&f) { if (__builtin_expect(stopping, false)) { throw crimson::common::system_shutdown_exception(); } const hobject_t oid = get_oid(hobj); auto wrapper = [f=std::move(f), this](auto obc) { check_blocklisted_obc_watchers(obc); return f(obc); }; switch (get_lock_type(op_info)) { case RWState::RWREAD: return obc_loader.with_obc<RWState::RWREAD>(oid, std::move(wrapper)); case RWState::RWWRITE: return obc_loader.with_obc<RWState::RWWRITE>(oid, std::move(wrapper)); case RWState::RWEXCL: return obc_loader.with_obc<RWState::RWEXCL>(oid, std::move(wrapper)); default: ceph_abort(); }; } PG::interruptible_future<> PG::handle_rep_op(Ref<MOSDRepOp> req) { if (__builtin_expect(stopping, false)) { return seastar::make_exception_future<>( crimson::common::system_shutdown_exception()); } logger().debug("{}: {}", __func__, *req); if (can_discard_replica_op(*req)) { return seastar::now(); } ceph::os::Transaction txn; auto encoded_txn = req->get_data().cbegin(); decode(txn, encoded_txn); auto p = req->logbl.cbegin(); std::vector<pg_log_entry_t> log_entries; decode(log_entries, p); log_operation(std::move(log_entries), req->pg_trim_to, req->version, req->min_last_complete_ondisk, !txn.empty(), txn, false); logger().debug("PG::handle_rep_op: do_transaction..."); return interruptor::make_interruptible(shard_services.get_store().do_transaction( coll_ref, std::move(txn))).then_interruptible( [req, lcod=peering_state.get_info().last_complete, this] { peering_state.update_last_complete_ondisk(lcod); const auto map_epoch = get_osdmap_epoch(); auto reply = crimson::make_message<MOSDRepOpReply>( req.get(), pg_whoami, 0, map_epoch, req->get_min_epoch(), CEPH_OSD_FLAG_ONDISK); reply->set_last_complete_ondisk(lcod); return shard_services.send_to_osd(req->from.osd, std::move(reply), map_epoch); }); } void PG::log_operation( std::vector<pg_log_entry_t>&& logv, const eversion_t &trim_to, const eversion_t &roll_forward_to, const eversion_t &min_last_complete_ondisk, bool transaction_applied, ObjectStore::Transaction &txn, bool async) { logger().debug("{}", __func__); if (is_primary()) { ceph_assert(trim_to <= peering_state.get_last_update_ondisk()); } /* TODO: when we add snap mapper and projected log support, * we'll likely want to update them here. * * See src/osd/PrimaryLogPG.h:log_operation for how classic * handles these cases. */ #if 0 if (transaction_applied) { //TODO: //update_snap_map(logv, t); } auto last = logv.rbegin(); if (is_primary() && last != logv.rend()) { projected_log.skip_can_rollback_to_to_head(); projected_log.trim(cct, last->version, nullptr, nullptr, nullptr); } #endif if (!is_primary()) { // && !is_ec_pg() replica_clear_repop_obc(logv); } peering_state.append_log(std::move(logv), trim_to, roll_forward_to, min_last_complete_ondisk, txn, !txn.empty(), false); } void PG::replica_clear_repop_obc( const std::vector<pg_log_entry_t> &logv) { logger().debug("{} clearing {} entries", __func__, logv.size()); for (auto &&e: logv) { logger().debug(" {} get_object_boundary(from): {} " " head version(to): {}", e.soid, e.soid.get_object_boundary(), e.soid.get_head()); /* Have to blast all clones, they share a snapset */ obc_registry.clear_range( e.soid.get_object_boundary(), e.soid.get_head()); } } void PG::handle_rep_op_reply(const MOSDRepOpReply& m) { if (!can_discard_replica_op(m)) { backend->got_rep_op_reply(m); } } PG::interruptible_future<> PG::do_update_log_missing( Ref<MOSDPGUpdateLogMissing> m, crimson::net::ConnectionRef conn) { if (__builtin_expect(stopping, false)) { return seastar::make_exception_future<>( crimson::common::system_shutdown_exception()); } ceph_assert(m->get_type() == MSG_OSD_PG_UPDATE_LOG_MISSING); ObjectStore::Transaction t; std::optional<eversion_t> op_trim_to, op_roll_forward_to; if (m->pg_trim_to != eversion_t()) op_trim_to = m->pg_trim_to; if (m->pg_roll_forward_to != eversion_t()) op_roll_forward_to = m->pg_roll_forward_to; logger().debug("op_trim_to = {}, op_roll_forward_to = {}", op_trim_to, op_roll_forward_to); peering_state.append_log_entries_update_missing( m->entries, t, op_trim_to, op_roll_forward_to); return interruptor::make_interruptible(shard_services.get_store().do_transaction( coll_ref, std::move(t))).then_interruptible( [m, conn, lcod=peering_state.get_info().last_complete, this] { if (!peering_state.pg_has_reset_since(m->get_epoch())) { peering_state.update_last_complete_ondisk(lcod); auto reply = crimson::make_message<MOSDPGUpdateLogMissingReply>( spg_t(peering_state.get_info().pgid.pgid, get_primary().shard), pg_whoami.shard, m->get_epoch(), m->min_epoch, m->get_tid(), lcod); reply->set_priority(CEPH_MSG_PRIO_HIGH); return conn->send(std::move(reply)); } return seastar::now(); }); } PG::interruptible_future<> PG::do_update_log_missing_reply( Ref<MOSDPGUpdateLogMissingReply> m) { logger().debug("{}: got reply from {}", __func__, m->get_from()); auto it = log_entry_update_waiting_on.find(m->get_tid()); if (it != log_entry_update_waiting_on.end()) { if (it->second.waiting_on.count(m->get_from())) { it->second.waiting_on.erase(m->get_from()); if (m->last_complete_ondisk != eversion_t()) { peering_state.update_peer_last_complete_ondisk( m->get_from(), m->last_complete_ondisk); } } else { logger().error("{} : {} got reply {} from shard we are not waiting for ", __func__, peering_state.get_info().pgid, *m, m->get_from()); } if (it->second.waiting_on.empty()) { it->second.all_committed.set_value(); it->second.all_committed = {}; log_entry_update_waiting_on.erase(it); } } else { logger().error("{} : {} got reply {} on unknown tid {}", __func__, peering_state.get_info().pgid, *m, m->get_tid()); } return seastar::now(); } bool PG::old_peering_msg( const epoch_t reply_epoch, const epoch_t query_epoch) const { if (const epoch_t lpr = peering_state.get_last_peering_reset(); lpr > reply_epoch || lpr > query_epoch) { logger().debug("{}: pg changed {} lpr {}, reply_epoch {}, query_epoch {}", __func__, get_info().history, lpr, reply_epoch, query_epoch); return true; } return false; } bool PG::can_discard_replica_op(const Message& m, epoch_t m_map_epoch) const { // if a repop is replied after a replica goes down in a new osdmap, and // before the pg advances to this new osdmap, the repop replies before this // repop can be discarded by that replica OSD, because the primary resets the // connection to it when handling the new osdmap marking it down, and also // resets the messenger sesssion when the replica reconnects. to avoid the // out-of-order replies, the messages from that replica should be discarded. const auto osdmap = peering_state.get_osdmap(); const int from_osd = m.get_source().num(); if (osdmap->is_down(from_osd)) { return true; } // Mostly, this overlaps with the old_peering_msg // condition. An important exception is pushes // sent by replicas not in the acting set, since // if such a replica goes down it does not cause // a new interval. if (osdmap->get_down_at(from_osd) >= m_map_epoch) { return true; } // same pg? // if pg changes *at all*, we reset and repeer! return old_peering_msg(m_map_epoch, m_map_epoch); } seastar::future<> PG::stop() { logger().info("PG {} {}", pgid, __func__); stopping = true; cancel_local_background_io_reservation(); cancel_remote_recovery_reservation(); check_readable_timer.cancel(); renew_lease_timer.cancel(); return osdmap_gate.stop().then([this] { return wait_for_active_blocker.stop(); }).then([this] { return recovery_handler->stop(); }).then([this] { return recovery_backend->stop(); }).then([this] { return backend->stop(); }); } void PG::on_change(ceph::os::Transaction &t) { logger().debug("{} {}:", *this, __func__); context_registry_on_change(); obc_loader.notify_on_change(is_primary()); recovery_backend->on_peering_interval_change(t); backend->on_actingset_changed(is_primary()); wait_for_active_blocker.unblock(); if (is_primary()) { logger().debug("{} {}: requeueing", *this, __func__); client_request_orderer.requeue(shard_services, this); } else { logger().debug("{} {}: dropping requests", *this, __func__); client_request_orderer.clear_and_cancel(); } } void PG::context_registry_on_change() { obc_registry.for_each([](ObjectContextRef obc) { assert(obc); for (auto j = obc->watchers.begin(); j != obc->watchers.end(); j = obc->watchers.erase(j)) { j->second->discard_state(); } }); } bool PG::can_discard_op(const MOSDOp& m) const { if (m.get_map_epoch() < peering_state.get_info().history.same_primary_since) { logger().debug("{} changed after {} dropping {} ", __func__ , m.get_map_epoch(), m); return true; } if ((m.get_flags() & (CEPH_OSD_FLAG_BALANCE_READS | CEPH_OSD_FLAG_LOCALIZE_READS)) && !is_primary() && (m.get_map_epoch() < peering_state.get_info().history.same_interval_since)) { // Note: the Objecter will resend on interval change without the primary // changing if it actually sent to a replica. If the primary hasn't // changed since the send epoch, we got it, and we're primary, it won't // have resent even if the interval did change as it sent it to the primary // (us). return true; } return __builtin_expect(m.get_map_epoch() < peering_state.get_info().history.same_primary_since, false); } bool PG::is_degraded_or_backfilling_object(const hobject_t& soid) const { /* The conditions below may clear (on_local_recover, before we queue * the transaction) before we actually requeue the degraded waiters * in on_global_recover after the transaction completes. */ if (peering_state.get_pg_log().get_missing().get_items().count(soid)) return true; ceph_assert(!get_acting_recovery_backfill().empty()); for (auto& peer : get_acting_recovery_backfill()) { if (peer == get_primary()) continue; auto peer_missing_entry = peering_state.get_peer_missing().find(peer); // If an object is missing on an async_recovery_target, return false. // This will not block the op and the object is async recovered later. if (peer_missing_entry != peering_state.get_peer_missing().end() && peer_missing_entry->second.get_items().count(soid)) { return true; } // Object is degraded if after last_backfill AND // we are backfilling it if (is_backfill_target(peer) && peering_state.get_peer_info(peer).last_backfill <= soid && recovery_handler->backfill_state->get_last_backfill_started() >= soid && recovery_backend->is_recovering(soid)) { return true; } } return false; } PG::interruptible_future<std::optional<PG::complete_op_t>> PG::already_complete(const osd_reqid_t& reqid) { eversion_t version; version_t user_version; int ret; std::vector<pg_log_op_return_item_t> op_returns; if (peering_state.get_pg_log().get_log().get_request( reqid, &version, &user_version, &ret, &op_returns)) { complete_op_t dupinfo{ user_version, version, ret}; return backend->request_committed(reqid, version).then([dupinfo] { return seastar::make_ready_future<std::optional<complete_op_t>>(dupinfo); }); } else { return seastar::make_ready_future<std::optional<complete_op_t>>(std::nullopt); } } }
50,516
31.718264
92
cc
null
ceph-main/src/crimson/osd/pg.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #pragma once #include <memory> #include <optional> #include <boost/smart_ptr/intrusive_ref_counter.hpp> #include <seastar/core/future.hh> #include <seastar/core/shared_future.hh> #include "common/dout.h" #include "include/interval_set.h" #include "crimson/net/Fwd.h" #include "messages/MOSDRepOpReply.h" #include "messages/MOSDOpReply.h" #include "os/Transaction.h" #include "osd/osd_types.h" #include "osd/osd_types_fmt.h" #include "crimson/osd/object_context.h" #include "osd/PeeringState.h" #include "osd/SnapMapper.h" #include "crimson/common/interruptible_future.h" #include "crimson/common/type_helpers.h" #include "crimson/os/futurized_collection.h" #include "crimson/osd/backfill_state.h" #include "crimson/osd/pg_interval_interrupt_condition.h" #include "crimson/osd/ops_executer.h" #include "crimson/osd/osd_operations/client_request.h" #include "crimson/osd/osd_operations/logmissing_request.h" #include "crimson/osd/osd_operations/logmissing_request_reply.h" #include "crimson/osd/osd_operations/peering_event.h" #include "crimson/osd/osd_operations/replicated_request.h" #include "crimson/osd/shard_services.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/pg_activation_blocker.h" #include "crimson/osd/pg_recovery.h" #include "crimson/osd/pg_recovery_listener.h" #include "crimson/osd/recovery_backend.h" #include "crimson/osd/object_context_loader.h" class MQuery; class OSDMap; class PGBackend; class PGPeeringEvent; class osd_op_params_t; namespace recovery { class Context; } namespace crimson::net { class Messenger; } namespace crimson::os { class FuturizedStore; } namespace crimson::osd { class OpsExecuter; class BackfillRecovery; class PG : public boost::intrusive_ref_counter< PG, boost::thread_unsafe_counter>, public PGRecoveryListener, PeeringState::PeeringListener, DoutPrefixProvider { using ec_profile_t = std::map<std::string,std::string>; using cached_map_t = OSDMapService::cached_map_t; ClientRequest::PGPipeline request_pg_pipeline; PGPeeringPipeline peering_request_pg_pipeline; ClientRequest::Orderer client_request_orderer; spg_t pgid; pg_shard_t pg_whoami; crimson::os::CollectionRef coll_ref; ghobject_t pgmeta_oid; seastar::timer<seastar::lowres_clock> check_readable_timer; seastar::timer<seastar::lowres_clock> renew_lease_timer; public: template <typename T = void> using interruptible_future = ::crimson::interruptible::interruptible_future< ::crimson::osd::IOInterruptCondition, T>; PG(spg_t pgid, pg_shard_t pg_shard, crimson::os::CollectionRef coll_ref, pg_pool_t&& pool, std::string&& name, cached_map_t osdmap, ShardServices &shard_services, ec_profile_t profile); ~PG(); const pg_shard_t& get_pg_whoami() const final { return pg_whoami; } const spg_t& get_pgid() const final { return pgid; } PGBackend& get_backend() { return *backend; } const PGBackend& get_backend() const { return *backend; } // EpochSource epoch_t get_osdmap_epoch() const final { return peering_state.get_osdmap_epoch(); } eversion_t get_pg_trim_to() const { return peering_state.get_pg_trim_to(); } eversion_t get_min_last_complete_ondisk() const { return peering_state.get_min_last_complete_ondisk(); } const pg_info_t& get_info() const final { return peering_state.get_info(); } // DoutPrefixProvider std::ostream& gen_prefix(std::ostream& out) const final { return out << *this; } crimson::common::CephContext *get_cct() const final { return shard_services.get_cct(); } unsigned get_subsys() const final { return ceph_subsys_osd; } crimson::os::CollectionRef get_collection_ref() { return coll_ref; } // PeeringListener void prepare_write( pg_info_t &info, pg_info_t &last_written_info, PastIntervals &past_intervals, PGLog &pglog, bool dirty_info, bool dirty_big_info, bool need_write_epoch, ceph::os::Transaction &t) final; void scrub_requested(scrub_level_t scrub_level, scrub_type_t scrub_type) final; uint64_t get_snap_trimq_size() const final { return std::size(snap_trimq); } void send_cluster_message( int osd, MessageURef m, epoch_t epoch, bool share_map_update=false) final { (void)shard_services.send_to_osd(osd, std::move(m), epoch); } void send_pg_created(pg_t pgid) final { (void)shard_services.send_pg_created(pgid); } bool try_flush_or_schedule_async() final; void start_flush_on_transaction( ceph::os::Transaction &t) final { t.register_on_commit( new LambdaContext([this](int r){ peering_state.complete_flush(); })); } void on_flushed() final { // will be needed for unblocking IO operations/peering } template <typename T> void start_peering_event_operation(T &&evt, float delay = 0) { (void) shard_services.start_operation<LocalPeeringEvent>( this, pg_whoami, pgid, delay, std::forward<T>(evt)); } void schedule_event_after( PGPeeringEventRef event, float delay) final { start_peering_event_operation(std::move(*event), delay); } std::vector<pg_shard_t> get_replica_recovery_order() const final { return peering_state.get_replica_recovery_order(); } void request_local_background_io_reservation( unsigned priority, PGPeeringEventURef on_grant, PGPeeringEventURef on_preempt) final { // TODO -- we probably want to add a mechanism for blocking on this // after handling the peering event std::ignore = shard_services.local_request_reservation( pgid, on_grant ? make_lambda_context([this, on_grant=std::move(on_grant)] (int) { start_peering_event_operation(std::move(*on_grant)); }) : nullptr, priority, on_preempt ? make_lambda_context( [this, on_preempt=std::move(on_preempt)] (int) { start_peering_event_operation(std::move(*on_preempt)); }) : nullptr); } void update_local_background_io_priority( unsigned priority) final { // TODO -- we probably want to add a mechanism for blocking on this // after handling the peering event std::ignore = shard_services.local_update_priority( pgid, priority); } void cancel_local_background_io_reservation() final { // TODO -- we probably want to add a mechanism for blocking on this // after handling the peering event std::ignore = shard_services.local_cancel_reservation( pgid); } void request_remote_recovery_reservation( unsigned priority, PGPeeringEventURef on_grant, PGPeeringEventURef on_preempt) final { // TODO -- we probably want to add a mechanism for blocking on this // after handling the peering event std::ignore = shard_services.remote_request_reservation( pgid, on_grant ? make_lambda_context([this, on_grant=std::move(on_grant)] (int) { start_peering_event_operation(std::move(*on_grant)); }) : nullptr, priority, on_preempt ? make_lambda_context( [this, on_preempt=std::move(on_preempt)] (int) { start_peering_event_operation(std::move(*on_preempt)); }) : nullptr); } void cancel_remote_recovery_reservation() final { // TODO -- we probably want to add a mechanism for blocking on this // after handling the peering event std::ignore = shard_services.remote_cancel_reservation( pgid); } void schedule_event_on_commit( ceph::os::Transaction &t, PGPeeringEventRef on_commit) final { t.register_on_commit( make_lambda_context( [this, on_commit=std::move(on_commit)](int) { start_peering_event_operation(std::move(*on_commit)); })); } void update_heartbeat_peers(std::set<int> peers) final { // Not needed yet } void set_probe_targets(const std::set<pg_shard_t> &probe_set) final { // Not needed yet } void clear_probe_targets() final { // Not needed yet } void queue_want_pg_temp(const std::vector<int> &wanted) final { // TODO -- we probably want to add a mechanism for blocking on this // after handling the peering event std::ignore = shard_services.queue_want_pg_temp(pgid.pgid, wanted); } void clear_want_pg_temp() final { // TODO -- we probably want to add a mechanism for blocking on this // after handling the peering event std::ignore = shard_services.remove_want_pg_temp(pgid.pgid); } void check_recovery_sources(const OSDMapRef& newmap) final { // Not needed yet } void check_blocklisted_watchers() final; void clear_primary_state() final { // Not needed yet } void queue_check_readable(epoch_t last_peering_reset, ceph::timespan delay) final; void recheck_readable() final; unsigned get_target_pg_log_entries() const final; void on_pool_change() final { // Not needed yet } void on_role_change() final { // Not needed yet } void on_change(ceph::os::Transaction &t) final; void on_activate(interval_set<snapid_t> to_trim) final; void on_activate_complete() final; void on_new_interval() final { // Not needed yet } Context *on_clean() final; void on_activate_committed() final { // Not needed yet (will be needed for IO unblocking) } void on_active_exit() final { // Not needed yet } void on_removal(ceph::os::Transaction &t) final; std::pair<ghobject_t, bool> do_delete_work(ceph::os::Transaction &t, ghobject_t _next) final; // merge/split not ready void clear_ready_to_merge() final {} void set_not_ready_to_merge_target(pg_t pgid, pg_t src) final {} void set_not_ready_to_merge_source(pg_t pgid) final {} void set_ready_to_merge_target(eversion_t lu, epoch_t les, epoch_t lec) final {} void set_ready_to_merge_source(eversion_t lu) final {} void on_active_actmap() final; void on_active_advmap(const OSDMapRef &osdmap) final; epoch_t cluster_osdmap_trim_lower_bound() final { // TODO return 0; } void on_backfill_reserved() final { recovery_handler->on_backfill_reserved(); } void on_backfill_canceled() final { ceph_assert(0 == "Not implemented"); } void on_recovery_reserved() final { recovery_handler->start_pglogbased_recovery(); } bool try_reserve_recovery_space( int64_t primary_num_bytes, int64_t local_num_bytes) final { // TODO return true; } void unreserve_recovery_space() final {} struct PGLogEntryHandler : public PGLog::LogEntryHandler { PG *pg; ceph::os::Transaction *t; PGLogEntryHandler(PG *pg, ceph::os::Transaction *t) : pg(pg), t(t) {} // LogEntryHandler void remove(const hobject_t &hoid) override { // TODO } void try_stash(const hobject_t &hoid, version_t v) override { // TODO } void rollback(const pg_log_entry_t &entry) override { // TODO } void rollforward(const pg_log_entry_t &entry) override { // TODO } void trim(const pg_log_entry_t &entry) override { // TODO } }; PGLog::LogEntryHandlerRef get_log_handler( ceph::os::Transaction &t) final { return std::make_unique<PG::PGLogEntryHandler>(this, &t); } void rebuild_missing_set_with_deletes(PGLog &pglog) final { pglog.rebuild_missing_set_with_deletes_crimson( shard_services.get_store(), coll_ref, peering_state.get_info()).get(); } PerfCounters &get_peering_perf() final { return shard_services.get_recoverystate_perf_logger(); } PerfCounters &get_perf_logger() final { return shard_services.get_perf_logger(); } void log_state_enter(const char *state) final; void log_state_exit( const char *state_name, utime_t enter_time, uint64_t events, utime_t event_dur) final; void dump_recovery_info(Formatter *f) const final { } OstreamTemp get_clog_info() final { // not needed yet: replace with not a stub (needs to be wired up to monc) return OstreamTemp(CLOG_INFO, nullptr); } OstreamTemp get_clog_debug() final { // not needed yet: replace with not a stub (needs to be wired up to monc) return OstreamTemp(CLOG_DEBUG, nullptr); } OstreamTemp get_clog_error() final { // not needed yet: replace with not a stub (needs to be wired up to monc) return OstreamTemp(CLOG_ERROR, nullptr); } ceph::signedspan get_mnow() const final; HeartbeatStampsRef get_hb_stamps(int peer) final; void schedule_renew_lease(epoch_t plr, ceph::timespan delay) final; // Utility bool is_primary() const final { return peering_state.is_primary(); } bool is_nonprimary() const { return peering_state.is_nonprimary(); } bool is_peered() const final { return peering_state.is_peered(); } bool is_recovering() const final { return peering_state.is_recovering(); } bool is_backfilling() const final { return peering_state.is_backfilling(); } uint64_t get_last_user_version() const { return get_info().last_user_version; } bool get_need_up_thru() const { return peering_state.get_need_up_thru(); } epoch_t get_same_interval_since() const { return get_info().history.same_interval_since; } const auto& get_pgpool() const { return peering_state.get_pgpool(); } pg_shard_t get_primary() const { return peering_state.get_primary(); } /// initialize created PG void init( int role, const std::vector<int>& up, int up_primary, const std::vector<int>& acting, int acting_primary, const pg_history_t& history, const PastIntervals& pim, ceph::os::Transaction &t); seastar::future<> read_state(crimson::os::FuturizedStore::Shard* store); interruptible_future<> do_peering_event( PGPeeringEvent& evt, PeeringCtx &rctx); seastar::future<> handle_advance_map(cached_map_t next_map, PeeringCtx &rctx); seastar::future<> handle_activate_map(PeeringCtx &rctx); seastar::future<> handle_initialize(PeeringCtx &rctx); static hobject_t get_oid(const hobject_t& hobj); static RWState::State get_lock_type(const OpInfo &op_info); using load_obc_ertr = crimson::errorator< crimson::ct_error::enoent, crimson::ct_error::object_corrupted>; using load_obc_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, load_obc_ertr>; using interruptor = ::crimson::interruptible::interruptor< ::crimson::osd::IOInterruptCondition>; public: using with_obc_func_t = std::function<load_obc_iertr::future<> (ObjectContextRef)>; load_obc_iertr::future<> with_locked_obc( const hobject_t &hobj, const OpInfo &op_info, with_obc_func_t&& f); interruptible_future<> handle_rep_op(Ref<MOSDRepOp> m); void log_operation( std::vector<pg_log_entry_t>&& logv, const eversion_t &trim_to, const eversion_t &roll_forward_to, const eversion_t &min_last_complete_ondisk, bool transaction_applied, ObjectStore::Transaction &txn, bool async = false); void replica_clear_repop_obc( const std::vector<pg_log_entry_t> &logv); void handle_rep_op_reply(const MOSDRepOpReply& m); interruptible_future<> do_update_log_missing( Ref<MOSDPGUpdateLogMissing> m, crimson::net::ConnectionRef conn); interruptible_future<> do_update_log_missing_reply( Ref<MOSDPGUpdateLogMissingReply> m); void print(std::ostream& os) const; void dump_primary(Formatter*); seastar::future<> submit_error_log( Ref<MOSDOp> m, const OpInfo &op_info, ObjectContextRef obc, const std::error_code e, ceph_tid_t rep_tid, eversion_t &version); private: using do_osd_ops_ertr = crimson::errorator< crimson::ct_error::eagain>; using do_osd_ops_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, ::crimson::errorator<crimson::ct_error::eagain>>; template <typename Ret = void> using pg_rep_op_fut_t = std::tuple<interruptible_future<>, do_osd_ops_iertr::future<Ret>>; do_osd_ops_iertr::future<pg_rep_op_fut_t<MURef<MOSDOpReply>>> do_osd_ops( Ref<MOSDOp> m, crimson::net::ConnectionRef conn, ObjectContextRef obc, const OpInfo &op_info, const SnapContext& snapc); using do_osd_ops_success_func_t = std::function<do_osd_ops_iertr::future<>()>; using do_osd_ops_failure_func_t = std::function<do_osd_ops_iertr::future<>(const std::error_code&)>; struct do_osd_ops_params_t; do_osd_ops_iertr::future<pg_rep_op_fut_t<>> do_osd_ops( ObjectContextRef obc, std::vector<OSDOp>& ops, const OpInfo &op_info, const do_osd_ops_params_t &&params, do_osd_ops_success_func_t success_func, do_osd_ops_failure_func_t failure_func); template <class Ret, class SuccessFunc, class FailureFunc> do_osd_ops_iertr::future<pg_rep_op_fut_t<Ret>> do_osd_ops_execute( seastar::lw_shared_ptr<OpsExecuter> ox, std::vector<OSDOp>& ops, SuccessFunc&& success_func, FailureFunc&& failure_func); interruptible_future<MURef<MOSDOpReply>> do_pg_ops(Ref<MOSDOp> m); std::tuple<interruptible_future<>, interruptible_future<>> submit_transaction( ObjectContextRef&& obc, ceph::os::Transaction&& txn, osd_op_params_t&& oop, std::vector<pg_log_entry_t>&& log_entries); interruptible_future<> repair_object( const hobject_t& oid, eversion_t& v); void check_blocklisted_obc_watchers(ObjectContextRef &obc); private: PG_OSDMapGate osdmap_gate; ShardServices &shard_services; public: cached_map_t get_osdmap() { return peering_state.get_osdmap(); } eversion_t next_version() { return eversion_t(get_osdmap_epoch(), ++projected_last_update.version); } ShardServices& get_shard_services() final { return shard_services; } seastar::future<> stop(); private: std::unique_ptr<PGBackend> backend; std::unique_ptr<RecoveryBackend> recovery_backend; std::unique_ptr<PGRecovery> recovery_handler; PeeringState peering_state; eversion_t projected_last_update; public: ObjectContextRegistry obc_registry; ObjectContextLoader obc_loader; private: OSDriver osdriver; SnapMapper snap_mapper; public: // PeeringListener void publish_stats_to_osd() final; void clear_publish_stats() final; pg_stat_t get_stats() const; private: std::optional<pg_stat_t> pg_stats; public: RecoveryBackend* get_recovery_backend() final { return recovery_backend.get(); } PGRecovery* get_recovery_handler() final { return recovery_handler.get(); } PeeringState& get_peering_state() final { return peering_state; } bool has_reset_since(epoch_t epoch) const final { return peering_state.pg_has_reset_since(epoch); } const pg_missing_tracker_t& get_local_missing() const { return peering_state.get_pg_log().get_missing(); } epoch_t get_last_peering_reset() const final { return peering_state.get_last_peering_reset(); } const std::set<pg_shard_t> &get_acting_recovery_backfill() const { return peering_state.get_acting_recovery_backfill(); } bool is_backfill_target(pg_shard_t osd) const { return peering_state.is_backfill_target(osd); } void begin_peer_recover(pg_shard_t peer, const hobject_t oid) { peering_state.begin_peer_recover(peer, oid); } uint64_t min_peer_features() const { return peering_state.get_min_peer_features(); } const std::map<hobject_t, std::set<pg_shard_t>>& get_missing_loc_shards() const { return peering_state.get_missing_loc().get_missing_locs(); } const std::map<pg_shard_t, pg_missing_t> &get_shard_missing() const { return peering_state.get_peer_missing(); } epoch_t get_interval_start_epoch() const { return get_info().history.same_interval_since; } const pg_missing_const_i* get_shard_missing(pg_shard_t shard) const { if (shard == pg_whoami) return &get_local_missing(); else { auto it = peering_state.get_peer_missing().find(shard); if (it == peering_state.get_peer_missing().end()) return nullptr; else return &it->second; } } struct complete_op_t { const version_t user_version; const eversion_t version; const int err; }; interruptible_future<std::optional<complete_op_t>> already_complete(const osd_reqid_t& reqid); int get_recovery_op_priority() const { int64_t pri = 0; get_pgpool().info.opts.get(pool_opts_t::RECOVERY_OP_PRIORITY, &pri); return pri > 0 ? pri : crimson::common::local_conf()->osd_recovery_op_priority; } seastar::future<> mark_unfound_lost(int) { // TODO: see PrimaryLogPG::mark_all_unfound_lost() return seastar::now(); } bool old_peering_msg(epoch_t reply_epoch, epoch_t query_epoch) const; template <typename MsgType> bool can_discard_replica_op(const MsgType& m) const { return can_discard_replica_op(m, m.map_epoch); } private: // instead of seastar::gate, we use a boolean flag to indicate // whether the system is shutting down, as we don't need to track // continuations here. bool stopping = false; PGActivationBlocker wait_for_active_blocker; friend std::ostream& operator<<(std::ostream&, const PG& pg); friend class ClientRequest; friend struct CommonClientRequest; friend class PGAdvanceMap; template <class T> friend class PeeringEvent; friend class RepRequest; friend class LogMissingRequest; friend class LogMissingRequestReply; friend class BackfillRecovery; friend struct PGFacade; friend class InternalClientRequest; friend class WatchTimeoutRequest; friend class SnapTrimEvent; friend class SnapTrimObjSubEvent; private: seastar::future<bool> find_unfound() { return seastar::make_ready_future<bool>(true); } bool can_discard_replica_op(const Message& m, epoch_t m_map_epoch) const; bool can_discard_op(const MOSDOp& m) const; void context_registry_on_change(); bool is_missing_object(const hobject_t& soid) const { return peering_state.get_pg_log().get_missing().get_items().count(soid); } bool is_unreadable_object(const hobject_t &oid, eversion_t* v = 0) const final { return is_missing_object(oid) || !peering_state.get_missing_loc().readable_with_acting( oid, get_actingset(), v); } bool is_degraded_or_backfilling_object(const hobject_t& soid) const; const std::set<pg_shard_t> &get_actingset() const { return peering_state.get_actingset(); } private: friend class IOInterruptCondition; struct log_update_t { std::set<pg_shard_t> waiting_on; seastar::shared_promise<> all_committed; }; std::map<ceph_tid_t, log_update_t> log_entry_update_waiting_on; // snap trimming interval_set<snapid_t> snap_trimq; }; struct PG::do_osd_ops_params_t { crimson::net::ConnectionRef &get_connection() const { return conn; } osd_reqid_t get_reqid() const { return reqid; } utime_t get_mtime() const { return mtime; }; epoch_t get_map_epoch() const { return map_epoch; } entity_inst_t get_orig_source_inst() const { return orig_source_inst; } uint64_t get_features() const { return features; } // Only used by InternalClientRequest, no op flags bool has_flag(uint32_t flag) const { return false; } // Only used by ExecutableMessagePimpl entity_name_t get_source() const { return orig_source_inst.name; } crimson::net::ConnectionRef &conn; osd_reqid_t reqid; utime_t mtime; epoch_t map_epoch; entity_inst_t orig_source_inst; uint64_t features; }; std::ostream& operator<<(std::ostream&, const PG& pg); } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::PG> : fmt::ostream_formatter {}; #endif
23,713
28.385378
84
h
null
ceph-main/src/crimson/osd/pg_activation_blocker.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #include "crimson/osd/pg.h" #include "crimson/osd/pg_activation_blocker.h" namespace crimson::osd { void PGActivationBlocker::dump_detail(Formatter *f) const { f->dump_stream("pgid") << pg->get_pgid(); } void PGActivationBlocker::unblock() { p.set_value(); p = {}; } seastar::future<> PGActivationBlocker::wait(PGActivationBlocker::BlockingEvent::TriggerI&& trigger) { if (pg->get_peering_state().is_active()) { return seastar::now(); } else { return trigger.maybe_record_blocking(p.get_shared_future(), *this); } } seastar::future<> PGActivationBlocker::stop() { p.set_exception(crimson::common::system_shutdown_exception()); return seastar::now(); } } // namespace crimson::osd
821
21.216216
81
cc
null
ceph-main/src/crimson/osd/pg_activation_blocker.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #pragma once #include <seastar/core/future.hh> #include <seastar/core/shared_future.hh> #include "crimson/common/operation.h" #include "crimson/osd/osd_operation.h" namespace crimson::osd { class PG; class PGActivationBlocker : public crimson::BlockerT<PGActivationBlocker> { PG *pg; const spg_t pgid; seastar::shared_promise<> p; protected: void dump_detail(Formatter *f) const; public: static constexpr const char *type_name = "PGActivationBlocker"; using Blocker = PGActivationBlocker; PGActivationBlocker(PG *pg) : pg(pg) {} void unblock(); seastar::future<> wait(PGActivationBlocker::BlockingEvent::TriggerI&&); seastar::future<> stop(); }; } // namespace crimson::osd
815
21.666667
75
h
null
ceph-main/src/crimson/osd/pg_backend.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "pg_backend.h" #include <charconv> #include <optional> #include <boost/range/adaptor/filtered.hpp> #include <boost/range/adaptor/transformed.hpp> #include <boost/range/algorithm/copy.hpp> #include <fmt/format.h> #include <fmt/ostream.h> #include <seastar/core/print.hh> #include "messages/MOSDOp.h" #include "os/Transaction.h" #include "common/Checksummer.h" #include "common/Clock.h" #include "crimson/common/exception.h" #include "crimson/common/tmap_helpers.h" #include "crimson/os/futurized_collection.h" #include "crimson/os/futurized_store.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/object_context_loader.h" #include "replicated_backend.h" #include "replicated_recovery_backend.h" #include "ec_backend.h" #include "exceptions.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } using std::runtime_error; using std::string; using std::string_view; using crimson::common::local_conf; std::unique_ptr<PGBackend> PGBackend::create(pg_t pgid, const pg_shard_t pg_shard, const pg_pool_t& pool, crimson::os::CollectionRef coll, crimson::osd::ShardServices& shard_services, const ec_profile_t& ec_profile, DoutPrefixProvider &dpp) { switch (pool.type) { case pg_pool_t::TYPE_REPLICATED: return std::make_unique<ReplicatedBackend>(pgid, pg_shard, coll, shard_services, dpp); case pg_pool_t::TYPE_ERASURE: return std::make_unique<ECBackend>(pg_shard.shard, coll, shard_services, std::move(ec_profile), pool.stripe_width, dpp); default: throw runtime_error(seastar::format("unsupported pool type '{}'", pool.type)); } } PGBackend::PGBackend(shard_id_t shard, CollectionRef coll, crimson::osd::ShardServices &shard_services, DoutPrefixProvider &dpp) : shard{shard}, coll{coll}, shard_services{shard_services}, dpp{dpp}, store{&shard_services.get_store()} {} PGBackend::load_metadata_iertr::future <PGBackend::loaded_object_md_t::ref> PGBackend::load_metadata(const hobject_t& oid) { return interruptor::make_interruptible(store->get_attrs( coll, ghobject_t{oid, ghobject_t::NO_GEN, shard})).safe_then_interruptible( [oid](auto &&attrs) -> load_metadata_ertr::future<loaded_object_md_t::ref>{ loaded_object_md_t::ref ret(new loaded_object_md_t()); if (auto oiiter = attrs.find(OI_ATTR); oiiter != attrs.end()) { bufferlist bl = std::move(oiiter->second); try { ret->os = ObjectState( object_info_t(bl, oid), true); } catch (const buffer::error&) { logger().warn("unable to decode ObjectState"); throw crimson::osd::invalid_argument(); } } else { logger().error( "load_metadata: object {} present but missing object info", oid); return crimson::ct_error::object_corrupted::make(); } if (oid.is_head()) { // Returning object_corrupted when the object exsits and the // Snapset is either not found or empty. bool object_corrupted = true; if (auto ssiter = attrs.find(SS_ATTR); ssiter != attrs.end()) { object_corrupted = false; bufferlist bl = std::move(ssiter->second); if (bl.length()) { ret->ssc = new crimson::osd::SnapSetContext(oid.get_snapdir()); try { ret->ssc->snapset = SnapSet(bl); ret->ssc->exists = true; logger().debug( "load_metadata: object {} and snapset {} present", oid, ret->ssc->snapset); } catch (const buffer::error&) { logger().warn("unable to decode SnapSet"); throw crimson::osd::invalid_argument(); } } else { object_corrupted = true; } } if (object_corrupted) { logger().error( "load_metadata: object {} present but missing snapset", oid); return crimson::ct_error::object_corrupted::make(); } } return load_metadata_ertr::make_ready_future<loaded_object_md_t::ref>( std::move(ret)); }, crimson::ct_error::enoent::handle([oid] { logger().debug( "load_metadata: object {} doesn't exist, returning empty metadata", oid); return load_metadata_ertr::make_ready_future<loaded_object_md_t::ref>( new loaded_object_md_t{ ObjectState( object_info_t(oid), false), oid.is_head() ? (new crimson::osd::SnapSetContext(oid)) : nullptr }); })); } PGBackend::rep_op_fut_t PGBackend::mutate_object( std::set<pg_shard_t> pg_shards, crimson::osd::ObjectContextRef &&obc, ceph::os::Transaction&& txn, osd_op_params_t&& osd_op_p, epoch_t min_epoch, epoch_t map_epoch, std::vector<pg_log_entry_t>&& log_entries) { logger().trace("mutate_object: num_ops={}", txn.get_num_ops()); if (obc->obs.exists) { #if 0 obc->obs.oi.version = ctx->at_version; obc->obs.oi.prior_version = ctx->obs->oi.version; #endif obc->obs.oi.prior_version = obc->obs.oi.version; obc->obs.oi.version = osd_op_p.at_version; if (osd_op_p.user_at_version > obc->obs.oi.user_version) obc->obs.oi.user_version = osd_op_p.user_at_version; obc->obs.oi.last_reqid = osd_op_p.req_id; obc->obs.oi.mtime = osd_op_p.mtime; obc->obs.oi.local_mtime = ceph_clock_now(); // object_info_t { ceph::bufferlist osv; obc->obs.oi.encode_no_oid(osv, CEPH_FEATURES_ALL); // TODO: get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr)); txn.setattr(coll->get_cid(), ghobject_t{obc->obs.oi.soid}, OI_ATTR, osv); } // snapset if (obc->obs.oi.soid.snap == CEPH_NOSNAP) { logger().debug("final snapset {} in {}", obc->ssc->snapset, obc->obs.oi.soid); ceph::bufferlist bss; encode(obc->ssc->snapset, bss); txn.setattr(coll->get_cid(), ghobject_t{obc->obs.oi.soid}, SS_ATTR, bss); obc->ssc->exists = true; } else { logger().debug("no snapset (this is a clone)"); } } else { // reset cached ObjectState without enforcing eviction obc->obs.oi = object_info_t(obc->obs.oi.soid); } return _submit_transaction( std::move(pg_shards), obc->obs.oi.soid, std::move(txn), std::move(osd_op_p), min_epoch, map_epoch, std::move(log_entries)); } static inline bool _read_verify_data( const object_info_t& oi, const ceph::bufferlist& data) { if (oi.is_data_digest() && oi.size == data.length()) { // whole object? can we verify the checksum? if (auto crc = data.crc32c(-1); crc != oi.data_digest) { logger().error("full-object read crc {} != expected {} on {}", crc, oi.data_digest, oi.soid); // todo: mark soid missing, perform recovery, and retry return false; } } return true; } PGBackend::read_ierrorator::future<> PGBackend::read(const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) { const auto& oi = os.oi; const ceph_osd_op& op = osd_op.op; const uint64_t offset = op.extent.offset; uint64_t length = op.extent.length; logger().trace("read: {} {}~{}", oi.soid, offset, length); if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: {} DNE", __func__, os.oi.soid); return crimson::ct_error::enoent::make(); } // are we beyond truncate_size? size_t size = oi.size; if ((op.extent.truncate_seq > oi.truncate_seq) && (op.extent.truncate_size < offset + length) && (op.extent.truncate_size < size)) { size = op.extent.truncate_size; } if (offset >= size) { // read size was trimmed to zero and it is expected to do nothing, return read_errorator::now(); } if (!length) { // read the whole object if length is 0 length = size; } return _read(oi.soid, offset, length, op.flags).safe_then_interruptible_tuple( [&delta_stats, &oi, &osd_op](auto&& bl) -> read_errorator::future<> { if (!_read_verify_data(oi, bl)) { // crc mismatches return crimson::ct_error::object_corrupted::make(); } logger().debug("read: data length: {}", bl.length()); osd_op.op.extent.length = bl.length(); osd_op.rval = 0; delta_stats.num_rd++; delta_stats.num_rd_kb += shift_round_up(bl.length(), 10); osd_op.outdata = std::move(bl); return read_errorator::now(); }, crimson::ct_error::input_output_error::handle([] { return read_errorator::future<>{crimson::ct_error::object_corrupted::make()}; }), read_errorator::pass_further{}); } PGBackend::read_ierrorator::future<> PGBackend::sparse_read(const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: {} DNE", __func__, os.oi.soid); return crimson::ct_error::enoent::make(); } const auto& op = osd_op.op; /* clients (particularly cephfs) may send truncate operations out of order * w.r.t. reads. op.extent.truncate_seq and op.extent.truncate_size allow * the OSD to determine whether the client submitted read needs to be * adjusted to compensate for a truncate the OSD hasn't seen yet. */ uint64_t adjusted_size = os.oi.size; const uint64_t offset = op.extent.offset; uint64_t adjusted_length = op.extent.length; if ((os.oi.truncate_seq < op.extent.truncate_seq) && (op.extent.offset + op.extent.length > op.extent.truncate_size) && (adjusted_size > op.extent.truncate_size)) { adjusted_size = op.extent.truncate_size; } if (offset > adjusted_size) { adjusted_length = 0; } else if (offset + adjusted_length > adjusted_size) { adjusted_length = adjusted_size - offset; } logger().trace("sparse_read: {} {}~{}", os.oi.soid, op.extent.offset, op.extent.length); return interruptor::make_interruptible(store->fiemap(coll, ghobject_t{os.oi.soid}, offset, adjusted_length)).safe_then_interruptible( [&delta_stats, &os, &osd_op, this](auto&& m) { return seastar::do_with(interval_set<uint64_t>{std::move(m)}, [&delta_stats, &os, &osd_op, this](auto&& extents) { return interruptor::make_interruptible(store->readv(coll, ghobject_t{os.oi.soid}, extents, osd_op.op.flags)).safe_then_interruptible_tuple( [&delta_stats, &os, &osd_op, &extents](auto&& bl) -> read_errorator::future<> { if (_read_verify_data(os.oi, bl)) { osd_op.op.extent.length = bl.length(); // re-encode since it might be modified ceph::encode(extents, osd_op.outdata); encode_destructively(bl, osd_op.outdata); logger().trace("sparse_read got {} bytes from object {}", osd_op.op.extent.length, os.oi.soid); delta_stats.num_rd++; delta_stats.num_rd_kb += shift_round_up(osd_op.op.extent.length, 10); return read_errorator::make_ready_future<>(); } else { // crc mismatches return crimson::ct_error::object_corrupted::make(); } }, crimson::ct_error::input_output_error::handle([] { return read_errorator::future<>{crimson::ct_error::object_corrupted::make()}; }), read_errorator::pass_further{}); }); }); } namespace { template<class CSum> PGBackend::checksum_errorator::future<> do_checksum(ceph::bufferlist& init_value_bl, size_t chunk_size, const ceph::bufferlist& buf, ceph::bufferlist& result) { typename CSum::init_value_t init_value; auto init_value_p = init_value_bl.cbegin(); try { decode(init_value, init_value_p); // chop off the consumed part init_value_bl.splice(0, init_value_p.get_off()); } catch (const ceph::buffer::end_of_buffer&) { logger().warn("{}: init value not provided", __func__); return crimson::ct_error::invarg::make(); } const uint32_t chunk_count = buf.length() / chunk_size; ceph::bufferptr csum_data{ ceph::buffer::create(sizeof(typename CSum::value_t) * chunk_count)}; Checksummer::calculate<CSum>( init_value, chunk_size, 0, buf.length(), buf, &csum_data); encode(chunk_count, result); result.append(std::move(csum_data)); return PGBackend::checksum_errorator::now(); } } PGBackend::checksum_ierrorator::future<> PGBackend::checksum(const ObjectState& os, OSDOp& osd_op) { // sanity tests and normalize the argments auto& checksum = osd_op.op.checksum; if (checksum.offset == 0 && checksum.length == 0) { // zeroed offset+length implies checksum whole object checksum.length = os.oi.size; } else if (checksum.offset >= os.oi.size) { // read size was trimmed to zero, do nothing, // see PGBackend::read() return checksum_errorator::now(); } if (checksum.chunk_size > 0) { if (checksum.length == 0) { logger().warn("{}: length required when chunk size provided", __func__); return crimson::ct_error::invarg::make(); } if (checksum.length % checksum.chunk_size != 0) { logger().warn("{}: length not aligned to chunk size", __func__); return crimson::ct_error::invarg::make(); } } else { checksum.chunk_size = checksum.length; } if (checksum.length == 0) { uint32_t count = 0; encode(count, osd_op.outdata); return checksum_errorator::now(); } // read the chunk to be checksum'ed return _read(os.oi.soid, checksum.offset, checksum.length, osd_op.op.flags) .safe_then_interruptible( [&osd_op](auto&& read_bl) mutable -> checksum_errorator::future<> { auto& checksum = osd_op.op.checksum; if (read_bl.length() != checksum.length) { logger().warn("checksum: bytes read {} != {}", read_bl.length(), checksum.length); return crimson::ct_error::invarg::make(); } // calculate its checksum and put the result in outdata switch (checksum.type) { case CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH32: return do_checksum<Checksummer::xxhash32>(osd_op.indata, checksum.chunk_size, read_bl, osd_op.outdata); case CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH64: return do_checksum<Checksummer::xxhash64>(osd_op.indata, checksum.chunk_size, read_bl, osd_op.outdata); case CEPH_OSD_CHECKSUM_OP_TYPE_CRC32C: return do_checksum<Checksummer::crc32c>(osd_op.indata, checksum.chunk_size, read_bl, osd_op.outdata); default: logger().warn("checksum: unknown crc type ({})", static_cast<uint32_t>(checksum.type)); return crimson::ct_error::invarg::make(); } }); } PGBackend::cmp_ext_ierrorator::future<> PGBackend::cmp_ext(const ObjectState& os, OSDOp& osd_op) { const ceph_osd_op& op = osd_op.op; uint64_t obj_size = os.oi.size; if (os.oi.truncate_seq < op.extent.truncate_seq && op.extent.offset + op.extent.length > op.extent.truncate_size) { obj_size = op.extent.truncate_size; } uint64_t ext_len; if (op.extent.offset >= obj_size) { ext_len = 0; } else if (op.extent.offset + op.extent.length > obj_size) { ext_len = obj_size - op.extent.offset; } else { ext_len = op.extent.length; } auto read_ext = ll_read_ierrorator::make_ready_future<ceph::bufferlist>(); if (ext_len == 0) { logger().debug("{}: zero length extent", __func__); } else if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: {} DNE", __func__, os.oi.soid); } else { read_ext = _read(os.oi.soid, op.extent.offset, ext_len, 0); } return read_ext.safe_then_interruptible([&osd_op](auto&& read_bl) -> cmp_ext_errorator::future<> { for (unsigned index = 0; index < osd_op.indata.length(); index++) { char byte_in_op = osd_op.indata[index]; char byte_from_disk = (index < read_bl.length() ? read_bl[index] : 0); if (byte_in_op != byte_from_disk) { logger().debug("cmp_ext: mismatch at {}", index); // Unlike other ops, we set osd_op.rval here and return a different // error code via ct_error::cmp_fail. osd_op.rval = -MAX_ERRNO - index; return crimson::ct_error::cmp_fail::make(); } } osd_op.rval = 0; return cmp_ext_errorator::make_ready_future<>(); }); } PGBackend::stat_ierrorator::future<> PGBackend::stat( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) { if (os.exists/* TODO: && !os.is_whiteout() */) { logger().debug("stat os.oi.size={}, os.oi.mtime={}", os.oi.size, os.oi.mtime); encode(os.oi.size, osd_op.outdata); encode(os.oi.mtime, osd_op.outdata); } else { logger().debug("stat object does not exist"); return crimson::ct_error::enoent::make(); } delta_stats.num_rd++; return stat_errorator::now(); } PGBackend::write_iertr::future<> PGBackend::_writefull( ObjectState& os, off_t truncate_size, const bufferlist& bl, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats, unsigned flags) { const bool existing = maybe_create_new_object(os, txn, delta_stats); if (existing && bl.length() < os.oi.size) { txn.truncate(coll->get_cid(), ghobject_t{os.oi.soid}, bl.length()); truncate_update_size_and_usage(delta_stats, os.oi, truncate_size); osd_op_params.clean_regions.mark_data_region_dirty( bl.length(), os.oi.size - bl.length()); } if (bl.length()) { txn.write( coll->get_cid(), ghobject_t{os.oi.soid}, 0, bl.length(), bl, flags); update_size_and_usage( delta_stats, os.oi, 0, bl.length(), true); osd_op_params.clean_regions.mark_data_region_dirty( 0, std::max((uint64_t)bl.length(), os.oi.size)); } return seastar::now(); } PGBackend::write_iertr::future<> PGBackend::_truncate( ObjectState& os, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats, size_t offset, size_t truncate_size, uint32_t truncate_seq) { if (truncate_seq) { assert(offset == truncate_size); if (truncate_seq <= os.oi.truncate_seq) { logger().debug("{} truncate seq {} <= current {}, no-op", __func__, truncate_seq, os.oi.truncate_seq); return write_ertr::make_ready_future<>(); } else { logger().debug("{} truncate seq {} > current {}, truncating", __func__, truncate_seq, os.oi.truncate_seq); os.oi.truncate_seq = truncate_seq; os.oi.truncate_size = truncate_size; } } maybe_create_new_object(os, txn, delta_stats); if (os.oi.size != offset) { txn.truncate( coll->get_cid(), ghobject_t{os.oi.soid}, offset); if (os.oi.size > offset) { // TODO: modified_ranges.union_of(trim); osd_op_params.clean_regions.mark_data_region_dirty( offset, os.oi.size - offset); } else { // os.oi.size < offset osd_op_params.clean_regions.mark_data_region_dirty( os.oi.size, offset - os.oi.size); } truncate_update_size_and_usage(delta_stats, os.oi, offset); os.oi.clear_data_digest(); } delta_stats.num_wr++; return write_ertr::now(); } bool PGBackend::maybe_create_new_object( ObjectState& os, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats) { if (!os.exists) { ceph_assert(!os.oi.is_whiteout()); os.exists = true; os.oi.new_object(); txn.touch(coll->get_cid(), ghobject_t{os.oi.soid}); delta_stats.num_objects++; return false; } else if (os.oi.is_whiteout()) { os.oi.clear_flag(object_info_t::FLAG_WHITEOUT); delta_stats.num_whiteouts--; } return true; } void PGBackend::update_size_and_usage(object_stat_sum_t& delta_stats, object_info_t& oi, uint64_t offset, uint64_t length, bool write_full) { if (write_full || (offset + length > oi.size && length)) { uint64_t new_size = offset + length; delta_stats.num_bytes -= oi.size; delta_stats.num_bytes += new_size; oi.size = new_size; } delta_stats.num_wr++; delta_stats.num_wr_kb += shift_round_up(length, 10); } void PGBackend::truncate_update_size_and_usage(object_stat_sum_t& delta_stats, object_info_t& oi, uint64_t truncate_size) { if (oi.size != truncate_size) { delta_stats.num_bytes -= oi.size; delta_stats.num_bytes += truncate_size; oi.size = truncate_size; } } static bool is_offset_and_length_valid( const std::uint64_t offset, const std::uint64_t length) { if (const std::uint64_t max = local_conf()->osd_max_object_size; offset >= max || length > max || offset + length > max) { logger().debug("{} osd_max_object_size: {}, offset: {}, len: {}; " "Hard limit of object size is 4GB", __func__, max, offset, length); return false; } else { return true; } } PGBackend::interruptible_future<> PGBackend::set_allochint( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats) { maybe_create_new_object(os, txn, delta_stats); os.oi.expected_object_size = osd_op.op.alloc_hint.expected_object_size; os.oi.expected_write_size = osd_op.op.alloc_hint.expected_write_size; os.oi.alloc_hint_flags = osd_op.op.alloc_hint.flags; txn.set_alloc_hint(coll->get_cid(), ghobject_t{os.oi.soid}, os.oi.expected_object_size, os.oi.expected_write_size, os.oi.alloc_hint_flags); return seastar::now(); } PGBackend::write_iertr::future<> PGBackend::write( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { const ceph_osd_op& op = osd_op.op; uint64_t offset = op.extent.offset; uint64_t length = op.extent.length; bufferlist buf = osd_op.indata; if (op.extent.length != osd_op.indata.length()) { return crimson::ct_error::invarg::make(); } if (!is_offset_and_length_valid(op.extent.offset, op.extent.length)) { return crimson::ct_error::file_too_large::make(); } if (auto seq = os.oi.truncate_seq; seq != 0 && op.extent.truncate_seq < seq) { // old write, arrived after trimtrunc if (offset + length > os.oi.size) { // no-op if (offset > os.oi.size) { length = 0; buf.clear(); } else { // truncate auto len = os.oi.size - offset; buf.splice(len, length); length = len; } } } else if (op.extent.truncate_seq > seq) { // write arrives before trimtrunc if (os.exists && !os.oi.is_whiteout()) { txn.truncate(coll->get_cid(), ghobject_t{os.oi.soid}, op.extent.truncate_size); if (op.extent.truncate_size != os.oi.size) { os.oi.size = length; if (op.extent.truncate_size > os.oi.size) { osd_op_params.clean_regions.mark_data_region_dirty(os.oi.size, op.extent.truncate_size - os.oi.size); } else { osd_op_params.clean_regions.mark_data_region_dirty(op.extent.truncate_size, os.oi.size - op.extent.truncate_size); } } truncate_update_size_and_usage(delta_stats, os.oi, op.extent.truncate_size); } os.oi.truncate_seq = op.extent.truncate_seq; os.oi.truncate_size = op.extent.truncate_size; } maybe_create_new_object(os, txn, delta_stats); if (length == 0) { if (offset > os.oi.size) { txn.truncate(coll->get_cid(), ghobject_t{os.oi.soid}, op.extent.offset); truncate_update_size_and_usage(delta_stats, os.oi, op.extent.offset); } else { txn.nop(); } } else { txn.write(coll->get_cid(), ghobject_t{os.oi.soid}, offset, length, std::move(buf), op.flags); update_size_and_usage(delta_stats, os.oi, offset, length); } osd_op_params.clean_regions.mark_data_region_dirty(op.extent.offset, op.extent.length); return seastar::now(); } PGBackend::interruptible_future<> PGBackend::write_same( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { const ceph_osd_op& op = osd_op.op; const uint64_t len = op.writesame.length; if (len == 0) { return seastar::now(); } if (op.writesame.data_length == 0 || len % op.writesame.data_length != 0 || op.writesame.data_length != osd_op.indata.length()) { throw crimson::osd::invalid_argument(); } ceph::bufferlist repeated_indata; for (uint64_t size = 0; size < len; size += op.writesame.data_length) { repeated_indata.append(osd_op.indata); } maybe_create_new_object(os, txn, delta_stats); txn.write(coll->get_cid(), ghobject_t{os.oi.soid}, op.writesame.offset, len, std::move(repeated_indata), op.flags); update_size_and_usage(delta_stats, os.oi, op.writesame.offset, len); osd_op_params.clean_regions.mark_data_region_dirty(op.writesame.offset, len); return seastar::now(); } PGBackend::write_iertr::future<> PGBackend::writefull( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { const ceph_osd_op& op = osd_op.op; if (op.extent.length != osd_op.indata.length()) { return crimson::ct_error::invarg::make(); } if (!is_offset_and_length_valid(op.extent.offset, op.extent.length)) { return crimson::ct_error::file_too_large::make(); } return _writefull( os, op.extent.truncate_size, osd_op.indata, txn, osd_op_params, delta_stats, op.flags); } PGBackend::rollback_iertr::future<> PGBackend::rollback( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats, crimson::osd::ObjectContextRef head, crimson::osd::ObjectContextLoader& obc_loader) { const ceph_osd_op& op = osd_op.op; snapid_t snapid = (uint64_t)op.snap.snapid; assert(os.oi.soid.is_head()); logger().debug("{} deleting {} and rolling back to old snap {}", __func__, os.oi.soid ,snapid); hobject_t target_coid = os.oi.soid; target_coid.snap = snapid; return obc_loader.with_clone_obc_only<RWState::RWWRITE>( head, target_coid, [this, &os, &txn, &delta_stats, &osd_op_params] (auto resolved_obc) { if (resolved_obc->obs.oi.soid.is_head()) { // no-op: The resolved oid returned the head object logger().debug("PGBackend::rollback: loaded head_obc: {}" " do nothing", resolved_obc->obs.oi.soid); return rollback_iertr::now(); } /* TODO: https://tracker.ceph.com/issues/59114 This implementation will not * behave correctly for a rados operation consisting of a mutation followed * by a rollback to a snapshot since the last mutation of the object. * The correct behavior would be for the rollback to undo the mutation * earlier in the operation by resolving to the clone created at the start * of the operation (see resolve_oid). * Instead, it will select HEAD leaving that mutation intact since the SnapSet won't * yet contain that clone. This behavior exists in classic as well. */ logger().debug("PGBackend::rollback: loaded clone_obc: {}", resolved_obc->obs.oi.soid); // 1) Delete current head if (os.exists) { txn.remove(coll->get_cid(), ghobject_t{os.oi.soid, ghobject_t::NO_GEN, shard}); } // 2) Clone correct snapshot into head txn.clone(coll->get_cid(), ghobject_t{resolved_obc->obs.oi.soid}, ghobject_t{os.oi.soid}); // Copy clone obc.os.oi to os.oi os.oi.clear_flag(object_info_t::FLAG_WHITEOUT); os.oi.copy_user_bits(resolved_obc->obs.oi); delta_stats.num_bytes -= os.oi.size; delta_stats.num_bytes += resolved_obc->obs.oi.size; osd_op_params.clean_regions.mark_data_region_dirty(0, std::max(os.oi.size, resolved_obc->obs.oi.size)); osd_op_params.clean_regions.mark_omap_dirty(); // TODO: 3) Calculate clone_overlaps by following overlaps // forward from rollback snapshot // https://tracker.ceph.com/issues/58263 return rollback_iertr::now(); }).safe_then_interruptible([] { logger().debug("PGBackend::rollback succefully"); return rollback_iertr::now(); },// there's no snapshot here, or there's no object. // if there's no snapshot, we delete the object; // otherwise, do nothing. crimson::ct_error::enoent::handle( [this, &os, &snapid, &txn, &delta_stats] { logger().debug("PGBackend::rollback: deleting head on {}" " with snap_id of {}" " because got ENOENT|whiteout on obc lookup", os.oi.soid, snapid); return remove(os, txn, delta_stats, false); }), rollback_ertr::pass_further{}, crimson::ct_error::assert_all{"unexpected error in rollback"} ); } PGBackend::append_ierrorator::future<> PGBackend::append( ObjectState& os, OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { const ceph_osd_op& op = osd_op.op; if (op.extent.length != osd_op.indata.length()) { return crimson::ct_error::invarg::make(); } maybe_create_new_object(os, txn, delta_stats); if (op.extent.length) { txn.write(coll->get_cid(), ghobject_t{os.oi.soid}, os.oi.size /* offset */, op.extent.length, std::move(osd_op.indata), op.flags); update_size_and_usage(delta_stats, os.oi, os.oi.size, op.extent.length); osd_op_params.clean_regions.mark_data_region_dirty(os.oi.size, op.extent.length); } return seastar::now(); } PGBackend::write_iertr::future<> PGBackend::truncate( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{} object dne, truncate is a no-op", __func__); return write_ertr::now(); } const ceph_osd_op& op = osd_op.op; if (!is_offset_and_length_valid(op.extent.offset, op.extent.length)) { return crimson::ct_error::file_too_large::make(); } return _truncate( os, txn, osd_op_params, delta_stats, op.extent.offset, op.extent.truncate_size, op.extent.truncate_seq); } PGBackend::write_iertr::future<> PGBackend::zero( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{} object dne, zero is a no-op", __func__); return write_ertr::now(); } const ceph_osd_op& op = osd_op.op; if (!is_offset_and_length_valid(op.extent.offset, op.extent.length)) { return crimson::ct_error::file_too_large::make(); } if (op.extent.offset >= os.oi.size || op.extent.length == 0) { return write_iertr::now(); // noop } if (op.extent.offset + op.extent.length >= os.oi.size) { return _truncate( os, txn, osd_op_params, delta_stats, op.extent.offset, op.extent.truncate_size, op.extent.truncate_seq); } txn.zero(coll->get_cid(), ghobject_t{os.oi.soid}, op.extent.offset, op.extent.length); // TODO: modified_ranges.union_of(zeroed); osd_op_params.clean_regions.mark_data_region_dirty(op.extent.offset, op.extent.length); delta_stats.num_wr++; os.oi.clear_data_digest(); return write_ertr::now(); } PGBackend::create_iertr::future<> PGBackend::create( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats) { if (os.exists && !os.oi.is_whiteout() && (osd_op.op.flags & CEPH_OSD_OP_FLAG_EXCL)) { // this is an exclusive create return crimson::ct_error::eexist::make(); } if (osd_op.indata.length()) { // handle the legacy. `category` is no longer implemented. try { auto p = osd_op.indata.cbegin(); std::string category; decode(category, p); } catch (buffer::error&) { return crimson::ct_error::invarg::make(); } } maybe_create_new_object(os, txn, delta_stats); txn.create(coll->get_cid(), ghobject_t{os.oi.soid, ghobject_t::NO_GEN, shard}); return seastar::now(); } PGBackend::interruptible_future<> PGBackend::remove(ObjectState& os, ceph::os::Transaction& txn) { // todo: snapset txn.remove(coll->get_cid(), ghobject_t{os.oi.soid, ghobject_t::NO_GEN, shard}); os.oi.size = 0; os.oi.new_object(); os.exists = false; // todo: update watchers if (os.oi.is_whiteout()) { os.oi.clear_flag(object_info_t::FLAG_WHITEOUT); } return seastar::now(); } PGBackend::remove_iertr::future<> PGBackend::remove(ObjectState& os, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats, bool whiteout) { if (!os.exists) { return crimson::ct_error::enoent::make(); } if (!os.exists) { logger().debug("{} {} does not exist",__func__, os.oi.soid); return seastar::now(); } if (whiteout && os.oi.is_whiteout()) { logger().debug("{} whiteout set on {} ",__func__, os.oi.soid); return seastar::now(); } txn.remove(coll->get_cid(), ghobject_t{os.oi.soid, ghobject_t::NO_GEN, shard}); delta_stats.num_bytes -= os.oi.size; os.oi.size = 0; os.oi.new_object(); // todo: clone_overlap if (whiteout) { logger().debug("{} setting whiteout on {} ",__func__, os.oi.soid); os.oi.set_flag(object_info_t::FLAG_WHITEOUT); delta_stats.num_whiteouts++; txn.create(coll->get_cid(), ghobject_t{os.oi.soid, ghobject_t::NO_GEN, shard}); return seastar::now(); } // todo: update watchers if (os.oi.is_whiteout()) { os.oi.clear_flag(object_info_t::FLAG_WHITEOUT); delta_stats.num_whiteouts--; } delta_stats.num_objects--; os.exists = false; return seastar::now(); } PGBackend::interruptible_future<std::tuple<std::vector<hobject_t>, hobject_t>> PGBackend::list_objects(const hobject_t& start, uint64_t limit) const { auto gstart = start.is_min() ? ghobject_t{} : ghobject_t{start, 0, shard}; return interruptor::make_interruptible(store->list_objects(coll, gstart, ghobject_t::get_max(), limit)) .then_interruptible([](auto ret) { auto& [gobjects, next] = ret; std::vector<hobject_t> objects; boost::copy(gobjects | boost::adaptors::filtered([](const ghobject_t& o) { if (o.is_pgmeta()) { return false; } else if (o.hobj.is_temp()) { return false; } else { return o.is_no_gen(); } }) | boost::adaptors::transformed([](const ghobject_t& o) { return o.hobj; }), std::back_inserter(objects)); return seastar::make_ready_future<std::tuple<std::vector<hobject_t>, hobject_t>>( std::make_tuple(objects, next.hobj)); }); } PGBackend::setxattr_ierrorator::future<> PGBackend::setxattr( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats) { if (local_conf()->osd_max_attr_size > 0 && osd_op.op.xattr.value_len > local_conf()->osd_max_attr_size) { return crimson::ct_error::file_too_large::make(); } const auto max_name_len = std::min<uint64_t>( store->get_max_attr_name_length(), local_conf()->osd_max_attr_name_len); if (osd_op.op.xattr.name_len > max_name_len) { return crimson::ct_error::enametoolong::make(); } maybe_create_new_object(os, txn, delta_stats); std::string name{"_"}; ceph::bufferlist val; { auto bp = osd_op.indata.cbegin(); bp.copy(osd_op.op.xattr.name_len, name); bp.copy(osd_op.op.xattr.value_len, val); } logger().debug("setxattr on obj={} for attr={}", os.oi.soid, name); txn.setattr(coll->get_cid(), ghobject_t{os.oi.soid}, name, val); delta_stats.num_wr++; return seastar::now(); } PGBackend::get_attr_ierrorator::future<> PGBackend::getxattr( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { std::string name; ceph::bufferlist val; { auto bp = osd_op.indata.cbegin(); std::string aname; bp.copy(osd_op.op.xattr.name_len, aname); name = "_" + aname; } logger().debug("getxattr on obj={} for attr={}", os.oi.soid, name); return getxattr(os.oi.soid, std::move(name)).safe_then_interruptible( [&delta_stats, &osd_op] (ceph::bufferlist&& val) { osd_op.outdata = std::move(val); osd_op.op.xattr.value_len = osd_op.outdata.length(); delta_stats.num_rd++; delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); return get_attr_errorator::now(); }); } PGBackend::get_attr_ierrorator::future<ceph::bufferlist> PGBackend::getxattr( const hobject_t& soid, std::string_view key) const { return store->get_attr(coll, ghobject_t{soid}, key); } PGBackend::get_attr_ierrorator::future<ceph::bufferlist> PGBackend::getxattr( const hobject_t& soid, std::string&& key) const { return seastar::do_with(key, [this, &soid](auto &key) { return store->get_attr(coll, ghobject_t{soid}, key); }); } PGBackend::get_attr_ierrorator::future<> PGBackend::get_xattrs( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { return store->get_attrs(coll, ghobject_t{os.oi.soid}).safe_then( [&delta_stats, &osd_op](auto&& attrs) { std::vector<std::pair<std::string, bufferlist>> user_xattrs; ceph::bufferlist bl; for (auto& [key, val] : attrs) { if (key.size() > 1 && key[0] == '_') { bl.append(std::move(val)); user_xattrs.emplace_back(key.substr(1), std::move(bl)); } } ceph::encode(user_xattrs, osd_op.outdata); delta_stats.num_rd++; delta_stats.num_rd_kb += shift_round_up(bl.length(), 10); return get_attr_errorator::now(); }); } namespace { template<typename U, typename V> int do_cmp_xattr(int op, const U& lhs, const V& rhs) { switch (op) { case CEPH_OSD_CMPXATTR_OP_EQ: return lhs == rhs; case CEPH_OSD_CMPXATTR_OP_NE: return lhs != rhs; case CEPH_OSD_CMPXATTR_OP_GT: return lhs > rhs; case CEPH_OSD_CMPXATTR_OP_GTE: return lhs >= rhs; case CEPH_OSD_CMPXATTR_OP_LT: return lhs < rhs; case CEPH_OSD_CMPXATTR_OP_LTE: return lhs <= rhs; default: return -EINVAL; } } } // anonymous namespace static int do_xattr_cmp_u64(int op, uint64_t lhs, bufferlist& rhs_xattr) { uint64_t rhs; if (rhs_xattr.length() > 0) { const char* first = rhs_xattr.c_str(); if (auto [p, ec] = std::from_chars(first, first + rhs_xattr.length(), rhs); ec != std::errc()) { return -EINVAL; } } else { rhs = 0; } logger().debug("do_xattr_cmp_u64 '{}' vs '{}' op {}", lhs, rhs, op); return do_cmp_xattr(op, lhs, rhs); } PGBackend::cmp_xattr_ierrorator::future<> PGBackend::cmp_xattr( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { std::string name{"_"}; auto bp = osd_op.indata.cbegin(); bp.copy(osd_op.op.xattr.name_len, name); logger().debug("cmpxattr on obj={} for attr={}", os.oi.soid, name); return getxattr(os.oi.soid, std::move(name)).safe_then_interruptible( [&delta_stats, &osd_op] (auto &&xattr) -> cmp_xattr_ierrorator::future<> { delta_stats.num_rd++; delta_stats.num_rd_kb += shift_round_up(osd_op.op.xattr.value_len, 10); int result = 0; auto bp = osd_op.indata.cbegin(); bp += osd_op.op.xattr.name_len; switch (osd_op.op.xattr.cmp_mode) { case CEPH_OSD_CMPXATTR_MODE_STRING: { string lhs; bp.copy(osd_op.op.xattr.value_len, lhs); string_view rhs(xattr.c_str(), xattr.length()); result = do_cmp_xattr(osd_op.op.xattr.cmp_op, lhs, rhs); logger().debug("cmpxattr lhs={}, rhs={}", lhs, rhs); } break; case CEPH_OSD_CMPXATTR_MODE_U64: { uint64_t lhs; try { decode(lhs, bp); } catch (ceph::buffer::error& e) { logger().info("cmp_xattr: buffer error expection"); result = -EINVAL; break; } result = do_xattr_cmp_u64(osd_op.op.xattr.cmp_op, lhs, xattr); } break; default: logger().info("bad cmp mode {}", osd_op.op.xattr.cmp_mode); result = -EINVAL; } if (result == 0) { logger().info("cmp_xattr: comparison returned false"); return crimson::ct_error::ecanceled::make(); } else if (result == -EINVAL) { return crimson::ct_error::invarg::make(); } else { osd_op.rval = 1; return cmp_xattr_ierrorator::now(); } }).handle_error_interruptible( crimson::ct_error::enodata::handle([&delta_stats, &osd_op] () ->cmp_xattr_errorator::future<> { delta_stats.num_rd++; delta_stats.num_rd_kb += shift_round_up(osd_op.op.xattr.value_len, 10); return crimson::ct_error::ecanceled::make(); }), cmp_xattr_errorator::pass_further{} ); } PGBackend::rm_xattr_iertr::future<> PGBackend::rm_xattr( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn) { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: {} DNE", __func__, os.oi.soid); return crimson::ct_error::enoent::make(); } auto bp = osd_op.indata.cbegin(); string attr_name{"_"}; bp.copy(osd_op.op.xattr.name_len, attr_name); txn.rmattr(coll->get_cid(), ghobject_t{os.oi.soid}, attr_name); return rm_xattr_iertr::now(); } void PGBackend::clone( /* const */object_info_t& snap_oi, const ObjectState& os, const ObjectState& d_os, ceph::os::Transaction& txn) { // See OpsExecutor::execute_clone documentation txn.clone(coll->get_cid(), ghobject_t{os.oi.soid}, ghobject_t{d_os.oi.soid}); { ceph::bufferlist bv; snap_oi.encode_no_oid(bv, CEPH_FEATURES_ALL); txn.setattr(coll->get_cid(), ghobject_t{d_os.oi.soid}, OI_ATTR, bv); } txn.rmattr(coll->get_cid(), ghobject_t{d_os.oi.soid}, SS_ATTR); } using get_omap_ertr = crimson::os::FuturizedStore::Shard::read_errorator::extend< crimson::ct_error::enodata>; using get_omap_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, get_omap_ertr>; static get_omap_iertr::future< crimson::os::FuturizedStore::Shard::omap_values_t> maybe_get_omap_vals_by_keys( crimson::os::FuturizedStore::Shard* store, const crimson::os::CollectionRef& coll, const object_info_t& oi, const std::set<std::string>& keys_to_get) { if (oi.is_omap()) { return store->omap_get_values(coll, ghobject_t{oi.soid}, keys_to_get); } else { return crimson::ct_error::enodata::make(); } } static get_omap_iertr::future< std::tuple<bool, crimson::os::FuturizedStore::Shard::omap_values_t>> maybe_get_omap_vals( crimson::os::FuturizedStore::Shard* store, const crimson::os::CollectionRef& coll, const object_info_t& oi, const std::string& start_after) { if (oi.is_omap()) { return store->omap_get_values(coll, ghobject_t{oi.soid}, start_after); } else { return crimson::ct_error::enodata::make(); } } PGBackend::ll_read_ierrorator::future<ceph::bufferlist> PGBackend::omap_get_header( const crimson::os::CollectionRef& c, const ghobject_t& oid) const { return store->omap_get_header(c, oid) .handle_error( crimson::ct_error::enodata::handle([] { return seastar::make_ready_future<bufferlist>(); }), ll_read_errorator::pass_further{} ); } PGBackend::ll_read_ierrorator::future<> PGBackend::omap_get_header( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { if (os.oi.is_omap()) { return omap_get_header(coll, ghobject_t{os.oi.soid}).safe_then_interruptible( [&delta_stats, &osd_op] (ceph::bufferlist&& header) { osd_op.outdata = std::move(header); delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); delta_stats.num_rd++; return seastar::now(); }); } else { // no omap? return empty data but not ENOENT. This is imporant for // the case when the object is being creating due to to may_write(). return seastar::now(); } } PGBackend::ll_read_ierrorator::future<> PGBackend::omap_get_keys( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: object does not exist: {}", os.oi.soid); return crimson::ct_error::enoent::make(); } std::string start_after; uint64_t max_return; try { auto p = osd_op.indata.cbegin(); decode(start_after, p); decode(max_return, p); } catch (buffer::error&) { throw crimson::osd::invalid_argument{}; } max_return = std::min(max_return, local_conf()->osd_max_omap_entries_per_request); // TODO: truly chunk the reading return maybe_get_omap_vals(store, coll, os.oi, start_after).safe_then_interruptible( [=,&delta_stats, &osd_op](auto ret) { ceph::bufferlist result; bool truncated = false; uint32_t num = 0; for (auto &[key, val] : std::get<1>(ret)) { if (num >= max_return || result.length() >= local_conf()->osd_max_omap_bytes_per_request) { truncated = true; break; } encode(key, result); ++num; } encode(num, osd_op.outdata); osd_op.outdata.claim_append(result); encode(truncated, osd_op.outdata); delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); delta_stats.num_rd++; return seastar::now(); }).handle_error_interruptible( crimson::ct_error::enodata::handle([&osd_op] { uint32_t num = 0; bool truncated = false; encode(num, osd_op.outdata); encode(truncated, osd_op.outdata); osd_op.rval = 0; return seastar::now(); }), ll_read_errorator::pass_further{} ); } static PGBackend::omap_cmp_ertr::future<> do_omap_val_cmp( std::map<std::string, bufferlist, std::less<>> out, std::map<std::string, std::pair<bufferlist, int>> assertions) { bufferlist empty; for (const auto &[akey, avalue] : assertions) { const auto [abl, aflag] = avalue; auto out_entry = out.find(akey); bufferlist &bl = (out_entry != out.end()) ? out_entry->second : empty; switch (aflag) { case CEPH_OSD_CMPXATTR_OP_EQ: if (!(bl == abl)) { return crimson::ct_error::ecanceled::make(); } break; case CEPH_OSD_CMPXATTR_OP_LT: if (!(bl < abl)) { return crimson::ct_error::ecanceled::make(); } break; case CEPH_OSD_CMPXATTR_OP_GT: if (!(bl > abl)) { return crimson::ct_error::ecanceled::make(); } break; default: return crimson::ct_error::invarg::make(); } } return PGBackend::omap_cmp_ertr::now(); } PGBackend::omap_cmp_iertr::future<> PGBackend::omap_cmp( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: object does not exist: {}", os.oi.soid); return crimson::ct_error::enoent::make(); } auto bp = osd_op.indata.cbegin(); std::map<std::string, std::pair<bufferlist, int> > assertions; try { decode(assertions, bp); } catch (buffer::error&) { return crimson::ct_error::invarg::make(); } delta_stats.num_rd++; if (os.oi.is_omap()) { std::set<std::string> to_get; for (auto &i: assertions) { to_get.insert(i.first); } return store->omap_get_values(coll, ghobject_t{os.oi.soid}, to_get) .safe_then([=, &osd_op] (auto&& out) -> omap_cmp_iertr::future<> { osd_op.rval = 0; return do_omap_val_cmp(out, assertions); }); } else { return crimson::ct_error::ecanceled::make(); } } PGBackend::ll_read_ierrorator::future<> PGBackend::omap_get_vals( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: object does not exist: {}", os.oi.soid); return crimson::ct_error::enoent::make(); } std::string start_after; uint64_t max_return; std::string filter_prefix; try { auto p = osd_op.indata.cbegin(); decode(start_after, p); decode(max_return, p); decode(filter_prefix, p); } catch (buffer::error&) { throw crimson::osd::invalid_argument{}; } max_return = \ std::min(max_return, local_conf()->osd_max_omap_entries_per_request); delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); delta_stats.num_rd++; // TODO: truly chunk the reading return maybe_get_omap_vals(store, coll, os.oi, start_after) .safe_then_interruptible( [=, &osd_op] (auto&& ret) { auto [done, vals] = std::move(ret); assert(done); ceph::bufferlist result; bool truncated = false; uint32_t num = 0; auto iter = filter_prefix > start_after ? vals.lower_bound(filter_prefix) : std::begin(vals); for (; iter != std::end(vals); ++iter) { const auto& [key, value] = *iter; if (key.substr(0, filter_prefix.size()) != filter_prefix) { break; } else if (num >= max_return || result.length() >= local_conf()->osd_max_omap_bytes_per_request) { truncated = true; break; } encode(key, result); encode(value, result); ++num; } encode(num, osd_op.outdata); osd_op.outdata.claim_append(result); encode(truncated, osd_op.outdata); return ll_read_errorator::now(); }).handle_error_interruptible( crimson::ct_error::enodata::handle([&osd_op] { encode(uint32_t{0} /* num */, osd_op.outdata); encode(bool{false} /* truncated */, osd_op.outdata); osd_op.rval = 0; return ll_read_errorator::now(); }), ll_read_errorator::pass_further{} ); } PGBackend::ll_read_ierrorator::future<> PGBackend::omap_get_vals_by_keys( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: object does not exist: {}", __func__, os.oi.soid); return crimson::ct_error::enoent::make(); } std::set<std::string> keys_to_get; try { auto p = osd_op.indata.cbegin(); decode(keys_to_get, p); } catch (buffer::error&) { throw crimson::osd::invalid_argument(); } delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); delta_stats.num_rd++; return maybe_get_omap_vals_by_keys(store, coll, os.oi, keys_to_get) .safe_then_interruptible( [&osd_op] (crimson::os::FuturizedStore::Shard::omap_values_t&& vals) { encode(vals, osd_op.outdata); return ll_read_errorator::now(); }).handle_error_interruptible( crimson::ct_error::enodata::handle([&osd_op] { uint32_t num = 0; encode(num, osd_op.outdata); osd_op.rval = 0; return ll_read_errorator::now(); }), ll_read_errorator::pass_further{} ); } PGBackend::interruptible_future<> PGBackend::omap_set_vals( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { maybe_create_new_object(os, txn, delta_stats); ceph::bufferlist to_set_bl; try { auto p = osd_op.indata.cbegin(); decode_str_str_map_to_bl(p, &to_set_bl); } catch (buffer::error&) { throw crimson::osd::invalid_argument{}; } txn.omap_setkeys(coll->get_cid(), ghobject_t{os.oi.soid}, to_set_bl); osd_op_params.clean_regions.mark_omap_dirty(); delta_stats.num_wr++; delta_stats.num_wr_kb += shift_round_up(to_set_bl.length(), 10); os.oi.set_flag(object_info_t::FLAG_OMAP); os.oi.clear_omap_digest(); return seastar::now(); } PGBackend::interruptible_future<> PGBackend::omap_set_header( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { maybe_create_new_object(os, txn, delta_stats); txn.omap_setheader(coll->get_cid(), ghobject_t{os.oi.soid}, osd_op.indata); osd_op_params.clean_regions.mark_omap_dirty(); delta_stats.num_wr++; os.oi.set_flag(object_info_t::FLAG_OMAP); os.oi.clear_omap_digest(); return seastar::now(); } PGBackend::interruptible_future<> PGBackend::omap_remove_range( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats) { std::string key_begin, key_end; try { auto p = osd_op.indata.cbegin(); decode(key_begin, p); decode(key_end, p); } catch (buffer::error& e) { throw crimson::osd::invalid_argument{}; } txn.omap_rmkeyrange(coll->get_cid(), ghobject_t{os.oi.soid}, key_begin, key_end); delta_stats.num_wr++; os.oi.clear_omap_digest(); return seastar::now(); } PGBackend::interruptible_future<> PGBackend::omap_remove_key( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn) { ceph::bufferlist to_rm_bl; try { auto p = osd_op.indata.cbegin(); decode_str_set_to_bl(p, &to_rm_bl); } catch (buffer::error& e) { throw crimson::osd::invalid_argument{}; } txn.omap_rmkeys(coll->get_cid(), ghobject_t{os.oi.soid}, to_rm_bl); // TODO: // ctx->clean_regions.mark_omap_dirty(); // ctx->delta_stats.num_wr++; os.oi.clear_omap_digest(); return seastar::now(); } PGBackend::omap_clear_iertr::future<> PGBackend::omap_clear( ObjectState& os, OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats) { if (!os.exists || os.oi.is_whiteout()) { logger().debug("{}: object does not exist: {}", os.oi.soid); return crimson::ct_error::enoent::make(); } if (!os.oi.is_omap()) { return omap_clear_ertr::now(); } txn.omap_clear(coll->get_cid(), ghobject_t{os.oi.soid}); osd_op_params.clean_regions.mark_omap_dirty(); delta_stats.num_wr++; os.oi.clear_omap_digest(); os.oi.clear_flag(object_info_t::FLAG_OMAP); return omap_clear_ertr::now(); } PGBackend::interruptible_future<struct stat> PGBackend::stat( CollectionRef c, const ghobject_t& oid) const { return store->stat(c, oid); } PGBackend::read_errorator::future<std::map<uint64_t, uint64_t>> PGBackend::fiemap( CollectionRef c, const ghobject_t& oid, uint64_t off, uint64_t len) { return store->fiemap(c, oid, off, len); } PGBackend::write_iertr::future<> PGBackend::tmapput( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats, osd_op_params_t& osd_op_params) { logger().debug("PGBackend::tmapput: {}", os.oi.soid); auto ret = crimson::common::do_tmap_put(osd_op.indata.cbegin()); if (!ret.has_value()) { logger().debug("PGBackend::tmapup: {}, ret={}", os.oi.soid, ret.error()); ceph_assert(ret.error() == -EINVAL); return crimson::ct_error::invarg::make(); } else { auto bl = std::move(ret.value()); return _writefull( os, bl.length(), std::move(bl), txn, osd_op_params, delta_stats, 0); } } PGBackend::tmapup_iertr::future<> PGBackend::tmapup( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats, osd_op_params_t& osd_op_params) { logger().debug("PGBackend::tmapup: {}", os.oi.soid); return PGBackend::write_iertr::now( ).si_then([this, &os] { return _read(os.oi.soid, 0, os.oi.size, 0); }).handle_error_interruptible( crimson::ct_error::enoent::handle([](auto &) { return seastar::make_ready_future<bufferlist>(); }), PGBackend::write_iertr::pass_further{}, crimson::ct_error::assert_all{"read error in mutate_object_contents"} ).si_then([this, &os, &osd_op, &txn, &delta_stats, &osd_op_params] (auto &&bl) mutable -> PGBackend::tmapup_iertr::future<> { auto result = crimson::common::do_tmap_up( osd_op.indata.cbegin(), std::move(bl)); if (!result.has_value()) { int ret = result.error(); logger().debug("PGBackend::tmapup: {}, ret={}", os.oi.soid, ret); switch (ret) { case -EEXIST: return crimson::ct_error::eexist::make(); case -ENOENT: return crimson::ct_error::enoent::make(); case -EINVAL: return crimson::ct_error::invarg::make(); default: ceph_assert(0 == "impossible error"); return crimson::ct_error::invarg::make(); } } logger().debug( "PGBackend::tmapup: {}, result.value.length()={}, ret=0", os.oi.soid, result.value().length()); return _writefull( os, result.value().length(), result.value(), txn, osd_op_params, delta_stats, 0); }); } PGBackend::read_ierrorator::future<> PGBackend::tmapget( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) { logger().debug("PGBackend::tmapget: {}", os.oi.soid); const auto& oi = os.oi; logger().debug("PGBackend::tmapget: read {} 0~{}", oi.soid, oi.size); if (!os.exists || os.oi.is_whiteout()) { logger().debug("PGBackend::tmapget: {} DNE", os.oi.soid); return crimson::ct_error::enoent::make(); } return _read(oi.soid, 0, oi.size, 0).safe_then_interruptible_tuple( [&delta_stats, &osd_op](auto&& bl) -> read_errorator::future<> { logger().debug("PGBackend::tmapget: data length: {}", bl.length()); osd_op.op.extent.length = bl.length(); osd_op.rval = 0; delta_stats.num_rd++; delta_stats.num_rd_kb += shift_round_up(bl.length(), 10); osd_op.outdata = std::move(bl); return read_errorator::now(); }, crimson::ct_error::input_output_error::handle([] { return read_errorator::future<>{crimson::ct_error::object_corrupted::make()}; }), read_errorator::pass_further{}); }
58,718
31.405629
88
cc
null
ceph-main/src/crimson/osd/pg_backend.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <map> #include <memory> #include <string> #include <boost/container/flat_set.hpp> #include "include/rados.h" #include "crimson/os/futurized_store.h" #include "crimson/os/futurized_collection.h" #include "crimson/osd/acked_peers.h" #include "crimson/common/shared_lru.h" #include "messages/MOSDOp.h" #include "messages/MOSDOpReply.h" #include "os/Transaction.h" #include "osd/osd_types.h" #include "crimson/osd/object_context.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/osd_operations/osdop_params.h" struct hobject_t; namespace ceph::os { class Transaction; } namespace crimson::osd { class ShardServices; class PG; class ObjectContextLoader; } class PGBackend { protected: using CollectionRef = crimson::os::CollectionRef; using ec_profile_t = std::map<std::string, std::string>; // low-level read errorator using ll_read_errorator = crimson::os::FuturizedStore::Shard::read_errorator; using ll_read_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, ll_read_errorator>; public: using load_metadata_ertr = crimson::errorator< crimson::ct_error::object_corrupted>; using load_metadata_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, load_metadata_ertr>; using interruptor = ::crimson::interruptible::interruptor< ::crimson::osd::IOInterruptCondition>; template <typename T = void> using interruptible_future = ::crimson::interruptible::interruptible_future< ::crimson::osd::IOInterruptCondition, T>; using rep_op_fut_t = std::tuple<interruptible_future<>, interruptible_future<crimson::osd::acked_peers_t>>; PGBackend(shard_id_t shard, CollectionRef coll, crimson::osd::ShardServices &shard_services, DoutPrefixProvider &dpp); virtual ~PGBackend() = default; static std::unique_ptr<PGBackend> create(pg_t pgid, const pg_shard_t pg_shard, const pg_pool_t& pool, crimson::os::CollectionRef coll, crimson::osd::ShardServices& shard_services, const ec_profile_t& ec_profile, DoutPrefixProvider &dpp); using attrs_t = std::map<std::string, ceph::bufferptr, std::less<>>; using read_errorator = ll_read_errorator::extend< crimson::ct_error::object_corrupted>; using read_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, read_errorator>; read_ierrorator::future<> read( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats); read_ierrorator::future<> sparse_read( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats); using checksum_errorator = ll_read_errorator::extend< crimson::ct_error::object_corrupted, crimson::ct_error::invarg>; using checksum_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, checksum_errorator>; checksum_ierrorator::future<> checksum( const ObjectState& os, OSDOp& osd_op); using cmp_ext_errorator = ll_read_errorator::extend< crimson::ct_error::invarg, crimson::ct_error::cmp_fail>; using cmp_ext_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, cmp_ext_errorator>; cmp_ext_ierrorator::future<> cmp_ext( const ObjectState& os, OSDOp& osd_op); using stat_errorator = crimson::errorator<crimson::ct_error::enoent>; using stat_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, stat_errorator>; stat_ierrorator::future<> stat( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats); // TODO: switch the entire write family to errorator. using write_ertr = crimson::errorator< crimson::ct_error::file_too_large, crimson::ct_error::invarg>; using write_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, write_ertr>; using create_ertr = crimson::errorator< crimson::ct_error::invarg, crimson::ct_error::eexist>; using create_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, create_ertr>; create_iertr::future<> create( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, object_stat_sum_t& delta_stats); using remove_ertr = crimson::errorator< crimson::ct_error::enoent>; using remove_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, remove_ertr>; remove_iertr::future<> remove( ObjectState& os, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats, bool whiteout); interruptible_future<> remove( ObjectState& os, ceph::os::Transaction& txn); interruptible_future<> set_allochint( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, object_stat_sum_t& delta_stats); write_iertr::future<> write( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); interruptible_future<> write_same( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); write_iertr::future<> writefull( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); using append_errorator = crimson::errorator< crimson::ct_error::invarg>; using append_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, append_errorator>; append_ierrorator::future<> append( ObjectState& os, OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); using rollback_ertr = crimson::errorator< crimson::ct_error::enoent>; using rollback_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, rollback_ertr>; rollback_iertr::future<> rollback( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats, crimson::osd::ObjectContextRef head, crimson::osd::ObjectContextLoader& obc_loader); write_iertr::future<> truncate( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); write_iertr::future<> zero( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); rep_op_fut_t mutate_object( std::set<pg_shard_t> pg_shards, crimson::osd::ObjectContextRef &&obc, ceph::os::Transaction&& txn, osd_op_params_t&& osd_op_p, epoch_t min_epoch, epoch_t map_epoch, std::vector<pg_log_entry_t>&& log_entries); interruptible_future<std::tuple<std::vector<hobject_t>, hobject_t>> list_objects( const hobject_t& start, uint64_t limit) const; using setxattr_errorator = crimson::errorator< crimson::ct_error::file_too_large, crimson::ct_error::enametoolong>; using setxattr_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, setxattr_errorator>; setxattr_ierrorator::future<> setxattr( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, object_stat_sum_t& delta_stats); using get_attr_errorator = crimson::os::FuturizedStore::Shard::get_attr_errorator; using get_attr_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, get_attr_errorator>; get_attr_ierrorator::future<> getxattr( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; get_attr_ierrorator::future<ceph::bufferlist> getxattr( const hobject_t& soid, std::string_view key) const; get_attr_ierrorator::future<ceph::bufferlist> getxattr( const hobject_t& soid, std::string&& key) const; get_attr_ierrorator::future<> get_xattrs( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; using cmp_xattr_errorator = get_attr_errorator::extend< crimson::ct_error::ecanceled, crimson::ct_error::invarg>; using cmp_xattr_ierrorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, cmp_xattr_errorator>; cmp_xattr_ierrorator::future<> cmp_xattr( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; using rm_xattr_ertr = crimson::errorator<crimson::ct_error::enoent>; using rm_xattr_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, rm_xattr_ertr>; rm_xattr_iertr::future<> rm_xattr( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans); void clone( /* const */object_info_t& snap_oi, const ObjectState& os, const ObjectState& d_os, ceph::os::Transaction& trans); interruptible_future<struct stat> stat( CollectionRef c, const ghobject_t& oid) const; read_errorator::future<std::map<uint64_t, uint64_t>> fiemap( CollectionRef c, const ghobject_t& oid, uint64_t off, uint64_t len); write_iertr::future<> tmapput( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, object_stat_sum_t& delta_stats, osd_op_params_t& osd_op_params); using tmapup_ertr = write_ertr::extend< crimson::ct_error::enoent, crimson::ct_error::eexist>; using tmapup_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, tmapup_ertr>; tmapup_iertr::future<> tmapup( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, object_stat_sum_t& delta_stats, osd_op_params_t& osd_op_params); read_ierrorator::future<> tmapget( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats); // OMAP ll_read_ierrorator::future<> omap_get_keys( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; using omap_cmp_ertr = crimson::os::FuturizedStore::Shard::read_errorator::extend< crimson::ct_error::ecanceled, crimson::ct_error::invarg>; using omap_cmp_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, omap_cmp_ertr>; omap_cmp_iertr::future<> omap_cmp( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; ll_read_ierrorator::future<> omap_get_vals( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; ll_read_ierrorator::future<> omap_get_vals_by_keys( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; interruptible_future<> omap_set_vals( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); ll_read_ierrorator::future<ceph::bufferlist> omap_get_header( const crimson::os::CollectionRef& c, const ghobject_t& oid) const; ll_read_ierrorator::future<> omap_get_header( const ObjectState& os, OSDOp& osd_op, object_stat_sum_t& delta_stats) const; interruptible_future<> omap_set_header( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); interruptible_future<> omap_remove_range( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans, object_stat_sum_t& delta_stats); interruptible_future<> omap_remove_key( ObjectState& os, const OSDOp& osd_op, ceph::os::Transaction& trans); using omap_clear_ertr = crimson::errorator<crimson::ct_error::enoent>; using omap_clear_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, omap_clear_ertr>; omap_clear_iertr::future<> omap_clear( ObjectState& os, OSDOp& osd_op, ceph::os::Transaction& trans, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats); virtual void got_rep_op_reply(const MOSDRepOpReply&) {} virtual seastar::future<> stop() = 0; virtual void on_actingset_changed(bool same_primary) = 0; protected: const shard_id_t shard; CollectionRef coll; crimson::osd::ShardServices &shard_services; DoutPrefixProvider &dpp; ///< provides log prefix context crimson::os::FuturizedStore::Shard* store; virtual seastar::future<> request_committed( const osd_reqid_t& reqid, const eversion_t& at_version) = 0; public: struct loaded_object_md_t { ObjectState os; crimson::osd::SnapSetContextRef ssc; using ref = std::unique_ptr<loaded_object_md_t>; }; load_metadata_iertr::future<loaded_object_md_t::ref> load_metadata( const hobject_t &oid); private: virtual ll_read_ierrorator::future<ceph::bufferlist> _read( const hobject_t& hoid, size_t offset, size_t length, uint32_t flags) = 0; write_iertr::future<> _writefull( ObjectState& os, off_t truncate_size, const bufferlist& bl, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats, unsigned flags); write_iertr::future<> _truncate( ObjectState& os, ceph::os::Transaction& txn, osd_op_params_t& osd_op_params, object_stat_sum_t& delta_stats, size_t offset, size_t truncate_size, uint32_t truncate_seq); bool maybe_create_new_object(ObjectState& os, ceph::os::Transaction& txn, object_stat_sum_t& delta_stats); void update_size_and_usage(object_stat_sum_t& delta_stats, object_info_t& oi, uint64_t offset, uint64_t length, bool write_full = false); void truncate_update_size_and_usage( object_stat_sum_t& delta_stats, object_info_t& oi, uint64_t truncate_size); virtual rep_op_fut_t _submit_transaction(std::set<pg_shard_t>&& pg_shards, const hobject_t& hoid, ceph::os::Transaction&& txn, osd_op_params_t&& osd_op_p, epoch_t min_epoch, epoch_t max_epoch, std::vector<pg_log_entry_t>&& log_entries) = 0; friend class ReplicatedRecoveryBackend; friend class ::crimson::osd::PG; };
14,909
32.207127
84
h
null
ceph-main/src/crimson/osd/pg_interval_interrupt_condition.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "pg_interval_interrupt_condition.h" #include "pg.h" #include "crimson/common/log.h" SET_SUBSYS(osd); namespace crimson::osd { IOInterruptCondition::IOInterruptCondition(Ref<PG>& pg) : pg(pg), e(pg->get_osdmap_epoch()) {} IOInterruptCondition::~IOInterruptCondition() { // for the sake of forward declaring PG (which is a detivate of // intrusive_ref_counter<...>) } bool IOInterruptCondition::new_interval_created() { LOG_PREFIX(IOInterruptCondition::new_interval_created); const epoch_t interval_start = pg->get_interval_start_epoch(); bool ret = e < interval_start; if (ret) { DEBUGDPP("stored interval e{} < interval_start e{}", *pg, e, interval_start); } return ret; } bool IOInterruptCondition::is_stopping() { LOG_PREFIX(IOInterruptCondition::is_stopping); if (pg->stopping) { DEBUGDPP("pg stopping", *pg); } return pg->stopping; } bool IOInterruptCondition::is_primary() { return pg->is_primary(); } } // namespace crimson::osd
1,088
23.75
81
cc
null
ceph-main/src/crimson/osd/pg_interval_interrupt_condition.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #pragma once #include "include/types.h" #include "crimson/common/errorator.h" #include "crimson/common/exception.h" #include "crimson/common/type_helpers.h" namespace crimson::osd { class PG; class IOInterruptCondition { public: IOInterruptCondition(Ref<PG>& pg); ~IOInterruptCondition(); bool new_interval_created(); bool is_stopping(); bool is_primary(); template <typename Fut> std::optional<Fut> may_interrupt() { if (new_interval_created()) { return seastar::futurize<Fut>::make_exception_future( ::crimson::common::actingset_changed(is_primary())); } if (is_stopping()) { return seastar::futurize<Fut>::make_exception_future( ::crimson::common::system_shutdown_exception()); } return std::optional<Fut>(); } template <typename T> static constexpr bool is_interruption_v = std::is_same_v<T, ::crimson::common::actingset_changed> || std::is_same_v<T, ::crimson::common::system_shutdown_exception>; static bool is_interruption(std::exception_ptr& eptr) { return (*eptr.__cxa_exception_type() == typeid(::crimson::common::actingset_changed) || *eptr.__cxa_exception_type() == typeid(::crimson::common::system_shutdown_exception)); } private: Ref<PG> pg; epoch_t e; }; } // namespace crimson::osd
1,445
24.368421
72
h
null
ceph-main/src/crimson/osd/pg_map.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "crimson/osd/pg_map.h" #include "crimson/osd/pg.h" #include "common/Formatter.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } using std::make_pair; namespace crimson::osd { PGMap::PGCreationState::PGCreationState(spg_t pgid) : pgid(pgid) {} PGMap::PGCreationState::~PGCreationState() {} void PGMap::PGCreationState::dump_detail(Formatter *f) const { f->dump_stream("pgid") << pgid; f->dump_bool("creating", creating); } PGMap::wait_for_pg_ret PGMap::wait_for_pg(PGCreationBlockingEvent::TriggerI&& trigger, spg_t pgid) { if (auto pg = get_pg(pgid)) { return make_pair( wait_for_pg_fut(wait_for_pg_ertr::ready_future_marker{}, pg), true); } else { auto &state = pgs_creating.emplace(pgid, pgid).first->second; return make_pair( wait_for_pg_fut( trigger.maybe_record_blocking(state.promise.get_shared_future(), state) ), state.creating); } } void PGMap::remove_pg(spg_t pgid) { ceph_assert(pgs.erase(pgid) == 1); } Ref<PG> PGMap::get_pg(spg_t pgid) { if (auto pg = pgs.find(pgid); pg != pgs.end()) { return pg->second; } else { return nullptr; } } void PGMap::set_creating(spg_t pgid) { logger().debug("Creating {}", pgid); ceph_assert(pgs.count(pgid) == 0); auto pg = pgs_creating.find(pgid); ceph_assert(pg != pgs_creating.end()); ceph_assert(pg->second.creating == false); pg->second.creating = true; } void PGMap::pg_created(spg_t pgid, Ref<PG> pg) { logger().debug("Created {}", pgid); ceph_assert(!pgs.count(pgid)); pgs.emplace(pgid, pg); auto creating_iter = pgs_creating.find(pgid); ceph_assert(creating_iter != pgs_creating.end()); auto promise = std::move(creating_iter->second.promise); pgs_creating.erase(creating_iter); promise.set_value(pg); } void PGMap::pg_loaded(spg_t pgid, Ref<PG> pg) { ceph_assert(!pgs.count(pgid)); pgs.emplace(pgid, pg); } void PGMap::pg_creation_canceled(spg_t pgid) { logger().debug("PGMap::pg_creation_canceled: {}", pgid); ceph_assert(!pgs.count(pgid)); auto creating_iter = pgs_creating.find(pgid); ceph_assert(creating_iter != pgs_creating.end()); auto promise = std::move(creating_iter->second.promise); pgs_creating.erase(creating_iter); promise.set_exception( crimson::ct_error::ecanceled::exception_ptr() ); } PGMap::~PGMap() {} }
2,470
22.990291
75
cc
null
ceph-main/src/crimson/osd/pg_map.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <map> #include <algorithm> #include <seastar/core/future.hh> #include <seastar/core/shared_future.hh> #include "include/types.h" #include "crimson/common/type_helpers.h" #include "crimson/common/smp_helpers.h" #include "crimson/osd/osd_operation.h" #include "osd/osd_types.h" namespace crimson::osd { class PG; /** * PGShardMapping * * Maintains a mapping from spg_t to the core containing that PG. Internally, each * core has a local copy of the mapping to enable core-local lookups. Updates * are proxied to core 0, and the back out to all other cores -- see maybe_create_pg. */ class PGShardMapping : public seastar::peering_sharded_service<PGShardMapping> { public: /// Returns mapping if present, NULL_CORE otherwise core_id_t get_pg_mapping(spg_t pgid) { auto iter = pg_to_core.find(pgid); ceph_assert_always(iter == pg_to_core.end() || iter->second != NULL_CORE); return iter == pg_to_core.end() ? NULL_CORE : iter->second; } /// Returns mapping for pgid, creates new one if it doesn't already exist seastar::future<core_id_t> maybe_create_pg( spg_t pgid, core_id_t core = NULL_CORE) { auto find_iter = pg_to_core.find(pgid); if (find_iter != pg_to_core.end()) { ceph_assert_always(find_iter->second != NULL_CORE); if (core != NULL_CORE) { ceph_assert_always(find_iter->second == core); } return seastar::make_ready_future<core_id_t>(find_iter->second); } else { return container().invoke_on(0,[pgid, core] (auto &primary_mapping) { auto [insert_iter, inserted] = primary_mapping.pg_to_core.emplace(pgid, core); ceph_assert_always(inserted); ceph_assert_always(primary_mapping.core_to_num_pgs.size() > 0); std::map<core_id_t, unsigned>::iterator core_iter; if (core == NULL_CORE) { core_iter = std::min_element( primary_mapping.core_to_num_pgs.begin(), primary_mapping.core_to_num_pgs.end(), [](const auto &left, const auto &right) { return left.second < right.second; }); } else { core_iter = primary_mapping.core_to_num_pgs.find(core); } ceph_assert_always(primary_mapping.core_to_num_pgs.end() != core_iter); insert_iter->second = core_iter->first; core_iter->second++; return primary_mapping.container().invoke_on_others( [pgid = insert_iter->first, core = insert_iter->second] (auto &other_mapping) { ceph_assert_always(core != NULL_CORE); auto [insert_iter, inserted] = other_mapping.pg_to_core.emplace(pgid, core); ceph_assert_always(inserted); }); }).then([this, pgid] { auto find_iter = pg_to_core.find(pgid); return seastar::make_ready_future<core_id_t>(find_iter->second); }); } } /// Remove pgid seastar::future<> remove_pg(spg_t pgid) { return container().invoke_on(0, [pgid](auto &primary_mapping) { auto iter = primary_mapping.pg_to_core.find(pgid); ceph_assert_always(iter != primary_mapping.pg_to_core.end()); ceph_assert_always(iter->second != NULL_CORE); auto count_iter = primary_mapping.core_to_num_pgs.find(iter->second); ceph_assert_always(count_iter != primary_mapping.core_to_num_pgs.end()); ceph_assert_always(count_iter->second > 0); --(count_iter->second); primary_mapping.pg_to_core.erase(iter); return primary_mapping.container().invoke_on_others( [pgid](auto &other_mapping) { auto iter = other_mapping.pg_to_core.find(pgid); ceph_assert_always(iter != other_mapping.pg_to_core.end()); ceph_assert_always(iter->second != NULL_CORE); other_mapping.pg_to_core.erase(iter); }); }); } size_t get_num_pgs() const { return pg_to_core.size(); } /// Map to cores in [min_core_mapping, core_mapping_limit) PGShardMapping(core_id_t min_core_mapping, core_id_t core_mapping_limit) { ceph_assert_always(min_core_mapping < core_mapping_limit); for (auto i = min_core_mapping; i != core_mapping_limit; ++i) { core_to_num_pgs.emplace(i, 0); } } template <typename F> void for_each_pgid(F &&f) const { for (const auto &i: pg_to_core) { std::invoke(f, i.first); } } private: std::map<core_id_t, unsigned> core_to_num_pgs; std::map<spg_t, core_id_t> pg_to_core; }; /** * PGMap * * Maps spg_t to PG instance within a shard. Handles dealing with waiting * on pg creation. */ class PGMap { struct PGCreationState : BlockerT<PGCreationState> { static constexpr const char * type_name = "PGCreation"; void dump_detail(Formatter *f) const final; spg_t pgid; seastar::shared_promise<Ref<PG>> promise; bool creating = false; PGCreationState(spg_t pgid); PGCreationState(const PGCreationState &) = delete; PGCreationState(PGCreationState &&) = delete; PGCreationState &operator=(const PGCreationState &) = delete; PGCreationState &operator=(PGCreationState &&) = delete; ~PGCreationState(); }; std::map<spg_t, PGCreationState> pgs_creating; using pgs_t = std::map<spg_t, Ref<PG>>; pgs_t pgs; public: using PGCreationBlocker = PGCreationState; using PGCreationBlockingEvent = PGCreationBlocker::BlockingEvent; /** * Get future for pg with a bool indicating whether it's already being * created. */ using wait_for_pg_ertr = crimson::errorator< crimson::ct_error::ecanceled>; using wait_for_pg_fut = wait_for_pg_ertr::future<Ref<PG>>; using wait_for_pg_ret = std::pair<wait_for_pg_fut, bool>; wait_for_pg_ret wait_for_pg(PGCreationBlockingEvent::TriggerI&&, spg_t pgid); /** * get PG in non-blocking manner */ Ref<PG> get_pg(spg_t pgid); /** * Set creating */ void set_creating(spg_t pgid); /** * Set newly created pg */ void pg_created(spg_t pgid, Ref<PG> pg); /** * Add newly loaded pg */ void pg_loaded(spg_t pgid, Ref<PG> pg); /** * Cancel pending creation of pgid. */ void pg_creation_canceled(spg_t pgid); void remove_pg(spg_t pgid); pgs_t& get_pgs() { return pgs; } const pgs_t& get_pgs() const { return pgs; } auto get_pg_count() const { return pgs.size(); } PGMap() = default; ~PGMap(); }; }
6,427
30.821782
86
h
null
ceph-main/src/crimson/osd/pg_meta.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "pg_meta.h" #include <string_view> #include "crimson/os/futurized_collection.h" #include "crimson/os/futurized_store.h" using std::string; using std::string_view; // prefix pgmeta_oid keys with _ so that PGLog::read_log_and_missing() can // easily skip them using crimson::os::FuturizedStore; PGMeta::PGMeta(FuturizedStore::Shard& store, spg_t pgid) : store{store}, pgid{pgid} {} namespace { template<typename T> std::optional<T> find_value(const FuturizedStore::Shard::omap_values_t& values, string_view key) { auto found = values.find(key); if (found == values.end()) { return {}; } auto p = found->second.cbegin(); T value; decode(value, p); return std::make_optional(std::move(value)); } } seastar::future<epoch_t> PGMeta::get_epoch() { return store.open_collection(coll_t{pgid}).then([this](auto ch) { return store.omap_get_values(ch, pgid.make_pgmeta_oid(), {string{infover_key}, string{epoch_key}}).safe_then( [](auto&& values) { { // sanity check auto infover = find_value<__u8>(values, infover_key); assert(infover); if (*infover < 10) { throw std::runtime_error("incompatible pg meta"); } } { auto epoch = find_value<epoch_t>(values, epoch_key); assert(epoch); return seastar::make_ready_future<epoch_t>(*epoch); } }, FuturizedStore::Shard::read_errorator::assert_all{ "PGMeta::get_epoch: unable to read pgmeta" }); }); } seastar::future<std::tuple<pg_info_t, PastIntervals>> PGMeta::load() { return store.open_collection(coll_t{pgid}).then([this](auto ch) { return store.omap_get_values(ch, pgid.make_pgmeta_oid(), {string{infover_key}, string{info_key}, string{biginfo_key}, string{fastinfo_key}}); }).safe_then([](auto&& values) { { // sanity check auto infover = find_value<__u8>(values, infover_key); assert(infover); if (infover < 10) { throw std::runtime_error("incompatible pg meta"); } } pg_info_t info; { auto found = find_value<pg_info_t>(values, info_key); assert(found); info = *std::move(found); } PastIntervals past_intervals; { using biginfo_t = std::pair<PastIntervals, decltype(info.purged_snaps)>; auto big_info = find_value<biginfo_t>(values, biginfo_key); assert(big_info); past_intervals = std::move(big_info->first); info.purged_snaps = std::move(big_info->second); } { auto fast_info = find_value<pg_fast_info_t>(values, fastinfo_key); if (fast_info) { fast_info->try_apply_to(&info); } } return seastar::make_ready_future<std::tuple<pg_info_t, PastIntervals>>( std::make_tuple(std::move(info), std::move(past_intervals))); }, FuturizedStore::Shard::read_errorator::assert_all{ "PGMeta::load: unable to read pgmeta" }); }
3,325
28.963964
81
cc
null
ceph-main/src/crimson/osd/pg_meta.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <tuple> #include <seastar/core/future.hh> #include "osd/osd_types.h" #include "crimson/os/futurized_store.h" /// PG related metadata class PGMeta { crimson::os::FuturizedStore::Shard& store; const spg_t pgid; public: PGMeta(crimson::os::FuturizedStore::Shard& store, spg_t pgid); seastar::future<epoch_t> get_epoch(); seastar::future<std::tuple<pg_info_t, PastIntervals>> load(); };
516
23.619048
70
h
null
ceph-main/src/crimson/osd/pg_recovery.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <fmt/format.h> #include <fmt/ostream.h> #include <fmt/ranges.h> #include "crimson/common/type_helpers.h" #include "crimson/osd/backfill_facades.h" #include "crimson/osd/osd_operations/background_recovery.h" #include "crimson/osd/osd_operations/peering_event.h" #include "crimson/osd/pg.h" #include "crimson/osd/pg_backend.h" #include "crimson/osd/pg_recovery.h" #include "osd/osd_types.h" #include "osd/PeeringState.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } using std::map; using std::set; void PGRecovery::start_pglogbased_recovery() { using PglogBasedRecovery = crimson::osd::PglogBasedRecovery; (void) pg->get_shard_services().start_operation<PglogBasedRecovery>( static_cast<crimson::osd::PG*>(pg), pg->get_shard_services(), pg->get_osdmap_epoch(), float(0.001)); } PGRecovery::interruptible_future<bool> PGRecovery::start_recovery_ops( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, size_t max_to_start) { assert(pg->is_primary()); assert(pg->is_peered()); assert(pg->is_recovering()); // in ceph-osd the do_recovery() path handles both the pg log-based // recovery and the backfill, albeit they are separated at the layer // of PeeringState. In crimson-osd backfill has been cut from it, so // and do_recovery() is actually solely for pg log-based recovery. // At the time of writing it's considered to move it to FSM and fix // the naming as well. assert(!pg->is_backfilling()); assert(!pg->get_peering_state().is_deleting()); std::vector<interruptible_future<>> started; started.reserve(max_to_start); max_to_start -= start_primary_recovery_ops(trigger, max_to_start, &started); if (max_to_start > 0) { max_to_start -= start_replica_recovery_ops(trigger, max_to_start, &started); } using interruptor = crimson::interruptible::interruptor<crimson::osd::IOInterruptCondition>; return interruptor::parallel_for_each(started, [] (auto&& ifut) { return std::move(ifut); }).then_interruptible([this] { bool done = !pg->get_peering_state().needs_recovery(); if (done) { logger().debug("start_recovery_ops: AllReplicasRecovered for pg: {}", pg->get_pgid()); using LocalPeeringEvent = crimson::osd::LocalPeeringEvent; if (!pg->get_peering_state().needs_backfill()) { logger().debug("start_recovery_ops: AllReplicasRecovered for pg: {}", pg->get_pgid()); (void) pg->get_shard_services().start_operation<LocalPeeringEvent>( static_cast<crimson::osd::PG*>(pg), pg->get_pg_whoami(), pg->get_pgid(), pg->get_osdmap_epoch(), pg->get_osdmap_epoch(), PeeringState::AllReplicasRecovered{}); } else { logger().debug("start_recovery_ops: RequestBackfill for pg: {}", pg->get_pgid()); (void) pg->get_shard_services().start_operation<LocalPeeringEvent>( static_cast<crimson::osd::PG*>(pg), pg->get_pg_whoami(), pg->get_pgid(), pg->get_osdmap_epoch(), pg->get_osdmap_epoch(), PeeringState::RequestBackfill{}); } } return seastar::make_ready_future<bool>(!done); }); } size_t PGRecovery::start_primary_recovery_ops( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, size_t max_to_start, std::vector<PGRecovery::interruptible_future<>> *out) { if (!pg->is_recovering()) { return 0; } if (!pg->get_peering_state().have_missing()) { pg->get_peering_state().local_recovery_complete(); return 0; } const auto &missing = pg->get_peering_state().get_pg_log().get_missing(); logger().info("{} recovering {} in pg {}, missing {}", __func__, pg->get_recovery_backend()->total_recovering(), *static_cast<crimson::osd::PG*>(pg), missing); unsigned started = 0; int skipped = 0; map<version_t, hobject_t>::const_iterator p = missing.get_rmissing().lower_bound(pg->get_peering_state().get_pg_log().get_log().last_requested); while (started < max_to_start && p != missing.get_rmissing().end()) { // TODO: chain futures here to enable yielding to scheduler? hobject_t soid; version_t v = p->first; auto it_objects = pg->get_peering_state().get_pg_log().get_log().objects.find(p->second); if (it_objects != pg->get_peering_state().get_pg_log().get_log().objects.end()) { // look at log! pg_log_entry_t *latest = it_objects->second; assert(latest->is_update() || latest->is_delete()); soid = latest->soid; } else { soid = p->second; } const pg_missing_item& item = missing.get_items().find(p->second)->second; ++p; hobject_t head = soid.get_head(); logger().info( "{} {} item.need {} {} {} {} {}", __func__, soid, item.need, missing.is_missing(soid) ? " (missing)":"", missing.is_missing(head) ? " (missing head)":"", pg->get_recovery_backend()->is_recovering(soid) ? " (recovering)":"", pg->get_recovery_backend()->is_recovering(head) ? " (recovering head)":""); // TODO: handle lost/unfound if (pg->get_recovery_backend()->is_recovering(soid)) { auto& recovery_waiter = pg->get_recovery_backend()->get_recovering(soid); out->emplace_back(recovery_waiter.wait_for_recovered(trigger)); ++started; } else if (pg->get_recovery_backend()->is_recovering(head)) { ++skipped; } else { out->emplace_back(recover_missing(trigger, soid, item.need)); ++started; } if (!skipped) pg->get_peering_state().set_last_requested(v); } logger().info("{} started {} skipped {}", __func__, started, skipped); return started; } size_t PGRecovery::start_replica_recovery_ops( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, size_t max_to_start, std::vector<PGRecovery::interruptible_future<>> *out) { if (!pg->is_recovering()) { return 0; } uint64_t started = 0; assert(!pg->get_peering_state().get_acting_recovery_backfill().empty()); auto recovery_order = get_replica_recovery_order(); for (auto &peer : recovery_order) { assert(peer != pg->get_peering_state().get_primary()); const auto& pm = pg->get_peering_state().get_peer_missing(peer); logger().debug("{}: peer osd.{} missing {} objects", __func__, peer, pm.num_missing()); logger().trace("{}: peer osd.{} missing {}", __func__, peer, pm.get_items()); // recover oldest first for (auto p = pm.get_rmissing().begin(); p != pm.get_rmissing().end() && started < max_to_start; ++p) { const auto &soid = p->second; if (pg->get_peering_state().get_missing_loc().is_unfound(soid)) { logger().debug("{}: object {} still unfound", __func__, soid); continue; } const pg_info_t &pi = pg->get_peering_state().get_peer_info(peer); if (soid > pi.last_backfill) { if (!pg->get_recovery_backend()->is_recovering(soid)) { logger().error( "{}: object {} in missing set for backfill (last_backfill {})" " but not in recovering", __func__, soid, pi.last_backfill); ceph_abort(); } continue; } if (pg->get_recovery_backend()->is_recovering(soid)) { logger().debug("{}: already recovering object {}", __func__, soid); auto& recovery_waiter = pg->get_recovery_backend()->get_recovering(soid); out->emplace_back(recovery_waiter.wait_for_recovered(trigger)); started++; continue; } if (pg->get_peering_state().get_missing_loc().is_deleted(soid)) { logger().debug("{}: soid {} is a delete, removing", __func__, soid); map<hobject_t,pg_missing_item>::const_iterator r = pm.get_items().find(soid); started++; out->emplace_back( prep_object_replica_deletes(trigger, soid, r->second.need)); continue; } if (soid.is_snap() && pg->get_peering_state().get_pg_log().get_missing().is_missing( soid.get_head())) { logger().debug("{}: head {} still missing on primary", __func__, soid.get_head()); continue; } if (pg->get_peering_state().get_pg_log().get_missing().is_missing(soid)) { logger().debug("{}: soid {} still missing on primary", __func__, soid); continue; } logger().debug("{}: recover_object_replicas({})", __func__,soid); map<hobject_t,pg_missing_item>::const_iterator r = pm.get_items().find( soid); started++; out->emplace_back( prep_object_replica_pushes(trigger, soid, r->second.need)); } } return started; } PGRecovery::interruptible_future<> PGRecovery::recover_missing( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, const hobject_t &soid, eversion_t need) { if (pg->get_peering_state().get_missing_loc().is_deleted(soid)) { return pg->get_recovery_backend()->add_recovering(soid).wait_track_blocking( trigger, pg->get_recovery_backend()->recover_delete(soid, need)); } else { return pg->get_recovery_backend()->add_recovering(soid).wait_track_blocking( trigger, pg->get_recovery_backend()->recover_object(soid, need) .handle_exception_interruptible( [=, this, soid = std::move(soid)] (auto e) { on_failed_recover({ pg->get_pg_whoami() }, soid, need); return seastar::make_ready_future<>(); }) ); } } RecoveryBackend::interruptible_future<> PGRecovery::prep_object_replica_deletes( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, const hobject_t& soid, eversion_t need) { return pg->get_recovery_backend()->add_recovering(soid).wait_track_blocking( trigger, pg->get_recovery_backend()->push_delete(soid, need).then_interruptible( [=, this] { object_stat_sum_t stat_diff; stat_diff.num_objects_recovered = 1; on_global_recover(soid, stat_diff, true); return seastar::make_ready_future<>(); }) ); } RecoveryBackend::interruptible_future<> PGRecovery::prep_object_replica_pushes( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, const hobject_t& soid, eversion_t need) { return pg->get_recovery_backend()->add_recovering(soid).wait_track_blocking( trigger, pg->get_recovery_backend()->recover_object(soid, need) .handle_exception_interruptible( [=, this, soid = std::move(soid)] (auto e) { on_failed_recover({ pg->get_pg_whoami() }, soid, need); return seastar::make_ready_future<>(); }) ); } void PGRecovery::on_local_recover( const hobject_t& soid, const ObjectRecoveryInfo& recovery_info, const bool is_delete, ceph::os::Transaction& t) { if (const auto &log = pg->get_peering_state().get_pg_log(); !is_delete && log.get_missing().is_missing(recovery_info.soid) && log.get_missing().get_items().find(recovery_info.soid)->second.need > recovery_info.version) { assert(pg->is_primary()); if (const auto* latest = log.get_log().objects.find(recovery_info.soid)->second; latest->op == pg_log_entry_t::LOST_REVERT) { ceph_abort("mark_unfound_lost (LOST_REVERT) is not implemented yet"); } } pg->get_peering_state().recover_got(soid, recovery_info.version, is_delete, t); if (pg->is_primary()) { if (!is_delete) { auto& obc = pg->get_recovery_backend()->get_recovering(soid).obc; //TODO: move to pg backend? obc->obs.exists = true; obc->obs.oi = recovery_info.oi; } if (!pg->is_unreadable_object(soid)) { pg->get_recovery_backend()->get_recovering(soid).set_readable(); } pg->publish_stats_to_osd(); } } void PGRecovery::on_global_recover ( const hobject_t& soid, const object_stat_sum_t& stat_diff, const bool is_delete) { logger().info("{} {}", __func__, soid); pg->get_peering_state().object_recovered(soid, stat_diff); pg->publish_stats_to_osd(); auto& recovery_waiter = pg->get_recovery_backend()->get_recovering(soid); if (!is_delete) recovery_waiter.obc->drop_recovery_read(); recovery_waiter.set_recovered(); pg->get_recovery_backend()->remove_recovering(soid); } void PGRecovery::on_failed_recover( const set<pg_shard_t>& from, const hobject_t& soid, const eversion_t& v) { for (auto pg_shard : from) { if (pg_shard != pg->get_pg_whoami()) { pg->get_peering_state().force_object_missing(pg_shard, soid, v); } } } void PGRecovery::on_peer_recover( pg_shard_t peer, const hobject_t &oid, const ObjectRecoveryInfo &recovery_info) { crimson::get_logger(ceph_subsys_osd).debug( "{}: {}, {} on {}", __func__, oid, recovery_info.version, peer); pg->get_peering_state().on_peer_recover(peer, oid, recovery_info.version); } void PGRecovery::_committed_pushed_object(epoch_t epoch, eversion_t last_complete) { if (!pg->has_reset_since(epoch)) { pg->get_peering_state().recovery_committed_to(last_complete); } else { crimson::get_logger(ceph_subsys_osd).debug( "{} pg has changed, not touching last_complete_ondisk", __func__); } } template <class EventT> void PGRecovery::start_backfill_recovery(const EventT& evt) { using BackfillRecovery = crimson::osd::BackfillRecovery; std::ignore = pg->get_shard_services().start_operation<BackfillRecovery>( static_cast<crimson::osd::PG*>(pg), pg->get_shard_services(), pg->get_osdmap_epoch(), evt); } void PGRecovery::request_replica_scan( const pg_shard_t& target, const hobject_t& begin, const hobject_t& end) { logger().debug("{}: target.osd={}", __func__, target.osd); auto msg = crimson::make_message<MOSDPGScan>( MOSDPGScan::OP_SCAN_GET_DIGEST, pg->get_pg_whoami(), pg->get_osdmap_epoch(), pg->get_last_peering_reset(), spg_t(pg->get_pgid().pgid, target.shard), begin, end); std::ignore = pg->get_shard_services().send_to_osd( target.osd, std::move(msg), pg->get_osdmap_epoch()); } void PGRecovery::request_primary_scan( const hobject_t& begin) { logger().debug("{}", __func__); using crimson::common::local_conf; std::ignore = pg->get_recovery_backend()->scan_for_backfill( begin, local_conf()->osd_backfill_scan_min, local_conf()->osd_backfill_scan_max ).then_interruptible([this] (BackfillInterval bi) { logger().debug("request_primary_scan:{}", __func__); using BackfillState = crimson::osd::BackfillState; start_backfill_recovery(BackfillState::PrimaryScanned{ std::move(bi) }); }); } void PGRecovery::enqueue_push( const hobject_t& obj, const eversion_t& v) { logger().debug("{}: obj={} v={}", __func__, obj, v); pg->get_recovery_backend()->add_recovering(obj); std::ignore = pg->get_recovery_backend()->recover_object(obj, v).\ handle_exception_interruptible([] (auto) { ceph_abort_msg("got exception on backfill's push"); return seastar::make_ready_future<>(); }).then_interruptible([this, obj] { logger().debug("enqueue_push:{}", __func__); using BackfillState = crimson::osd::BackfillState; start_backfill_recovery(BackfillState::ObjectPushed(std::move(obj))); }); } void PGRecovery::enqueue_drop( const pg_shard_t& target, const hobject_t& obj, const eversion_t& v) { // allocate a pair if target is seen for the first time auto& req = backfill_drop_requests[target]; if (!req) { req = crimson::make_message<MOSDPGBackfillRemove>( spg_t(pg->get_pgid().pgid, target.shard), pg->get_osdmap_epoch()); } req->ls.emplace_back(obj, v); } void PGRecovery::maybe_flush() { for (auto& [target, req] : backfill_drop_requests) { std::ignore = pg->get_shard_services().send_to_osd( target.osd, std::move(req), pg->get_osdmap_epoch()); } backfill_drop_requests.clear(); } void PGRecovery::update_peers_last_backfill( const hobject_t& new_last_backfill) { logger().debug("{}: new_last_backfill={}", __func__, new_last_backfill); // If new_last_backfill == MAX, then we will send OP_BACKFILL_FINISH to // all the backfill targets. Otherwise, we will move last_backfill up on // those targets need it and send OP_BACKFILL_PROGRESS to them. for (const auto& bt : pg->get_peering_state().get_backfill_targets()) { if (const pg_info_t& pinfo = pg->get_peering_state().get_peer_info(bt); new_last_backfill > pinfo.last_backfill) { pg->get_peering_state().update_peer_last_backfill(bt, new_last_backfill); auto m = crimson::make_message<MOSDPGBackfill>( pinfo.last_backfill.is_max() ? MOSDPGBackfill::OP_BACKFILL_FINISH : MOSDPGBackfill::OP_BACKFILL_PROGRESS, pg->get_osdmap_epoch(), pg->get_last_peering_reset(), spg_t(pg->get_pgid().pgid, bt.shard)); // Use default priority here, must match sub_op priority // TODO: if pinfo.last_backfill.is_max(), then // start_recovery_op(hobject_t::get_max()); m->last_backfill = pinfo.last_backfill; m->stats = pinfo.stats; std::ignore = pg->get_shard_services().send_to_osd( bt.osd, std::move(m), pg->get_osdmap_epoch()); logger().info("{}: peer {} num_objects now {} / {}", __func__, bt, pinfo.stats.stats.sum.num_objects, pg->get_info().stats.stats.sum.num_objects); } } } bool PGRecovery::budget_available() const { // TODO: the limits! return true; } void PGRecovery::backfilled() { using LocalPeeringEvent = crimson::osd::LocalPeeringEvent; std::ignore = pg->get_shard_services().start_operation<LocalPeeringEvent>( static_cast<crimson::osd::PG*>(pg), pg->get_pg_whoami(), pg->get_pgid(), pg->get_osdmap_epoch(), pg->get_osdmap_epoch(), PeeringState::Backfilled{}); } void PGRecovery::dispatch_backfill_event( boost::intrusive_ptr<const boost::statechart::event_base> evt) { logger().debug("{}", __func__); backfill_state->process_event(evt); } void PGRecovery::on_backfill_reserved() { logger().debug("{}", __func__); // PIMP and depedency injection for the sake unittestability. // I'm not afraid about the performance here. using BackfillState = crimson::osd::BackfillState; backfill_state = std::make_unique<BackfillState>( *this, std::make_unique<crimson::osd::PeeringFacade>(pg->get_peering_state()), std::make_unique<crimson::osd::PGFacade>( *static_cast<crimson::osd::PG*>(pg))); // yes, it's **not** backfilling yet. The PG_STATE_BACKFILLING // will be set after on_backfill_reserved() returns. // Backfill needs to take this into consideration when scheduling // events -- they must be mutually exclusive with PeeringEvent // instances. Otherwise the execution might begin without having // the state updated. ceph_assert(!pg->get_peering_state().is_backfilling()); start_backfill_recovery(BackfillState::Triggered{}); }
19,003
32.340351
102
cc
null
ceph-main/src/crimson/osd/pg_recovery.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/future.hh> #include "crimson/osd/backfill_state.h" #include "crimson/osd/pg_interval_interrupt_condition.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/pg_recovery_listener.h" #include "crimson/osd/scheduler/scheduler.h" #include "crimson/osd/shard_services.h" #include "crimson/osd/recovery_backend.h" #include "osd/object_state.h" namespace crimson::osd { class UrgentRecovery; } class MOSDPGBackfillRemove; class PGBackend; class PGRecovery : public crimson::osd::BackfillState::BackfillListener { public: template <typename T = void> using interruptible_future = RecoveryBackend::interruptible_future<T>; PGRecovery(PGRecoveryListener* pg) : pg(pg) {} virtual ~PGRecovery() {} void start_pglogbased_recovery(); interruptible_future<bool> start_recovery_ops( RecoveryBackend::RecoveryBlockingEvent::TriggerI&, size_t max_to_start); void on_backfill_reserved(); void dispatch_backfill_event( boost::intrusive_ptr<const boost::statechart::event_base> evt); seastar::future<> stop() { return seastar::now(); } private: PGRecoveryListener* pg; size_t start_primary_recovery_ops( RecoveryBackend::RecoveryBlockingEvent::TriggerI&, size_t max_to_start, std::vector<interruptible_future<>> *out); size_t start_replica_recovery_ops( RecoveryBackend::RecoveryBlockingEvent::TriggerI&, size_t max_to_start, std::vector<interruptible_future<>> *out); std::vector<pg_shard_t> get_replica_recovery_order() const { return pg->get_replica_recovery_order(); } RecoveryBackend::interruptible_future<> recover_missing( RecoveryBackend::RecoveryBlockingEvent::TriggerI&, const hobject_t &soid, eversion_t need); RecoveryBackend::interruptible_future<> prep_object_replica_deletes( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, const hobject_t& soid, eversion_t need); RecoveryBackend::interruptible_future<> prep_object_replica_pushes( RecoveryBackend::RecoveryBlockingEvent::TriggerI& trigger, const hobject_t& soid, eversion_t need); void on_local_recover( const hobject_t& soid, const ObjectRecoveryInfo& recovery_info, bool is_delete, ceph::os::Transaction& t); void on_global_recover ( const hobject_t& soid, const object_stat_sum_t& stat_diff, bool is_delete); void on_failed_recover( const std::set<pg_shard_t>& from, const hobject_t& soid, const eversion_t& v); void on_peer_recover( pg_shard_t peer, const hobject_t &oid, const ObjectRecoveryInfo &recovery_info); void _committed_pushed_object(epoch_t epoch, eversion_t last_complete); friend class ReplicatedRecoveryBackend; friend class crimson::osd::UrgentRecovery; // backfill begin std::unique_ptr<crimson::osd::BackfillState> backfill_state; std::map<pg_shard_t, MURef<MOSDPGBackfillRemove>> backfill_drop_requests; template <class EventT> void start_backfill_recovery( const EventT& evt); void request_replica_scan( const pg_shard_t& target, const hobject_t& begin, const hobject_t& end) final; void request_primary_scan( const hobject_t& begin) final; void enqueue_push( const hobject_t& obj, const eversion_t& v) final; void enqueue_drop( const pg_shard_t& target, const hobject_t& obj, const eversion_t& v) final; void maybe_flush() final; void update_peers_last_backfill( const hobject_t& new_last_backfill) final; bool budget_available() const final; void backfilled() final; friend crimson::osd::BackfillState::PGFacade; friend crimson::osd::PG; // backfill end };
3,761
30.613445
73
h
null
ceph-main/src/crimson/osd/pg_recovery_listener.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/future.hh> #include "common/hobject.h" #include "include/types.h" #include "osd/osd_types.h" namespace crimson::osd { class ShardServices; }; class RecoveryBackend; class PGRecovery; class PGRecoveryListener { public: virtual crimson::osd::ShardServices& get_shard_services() = 0; virtual PGRecovery* get_recovery_handler() = 0; virtual epoch_t get_osdmap_epoch() const = 0; virtual bool is_primary() const = 0; virtual bool is_peered() const = 0; virtual bool is_recovering() const = 0; virtual bool is_backfilling() const = 0; virtual PeeringState& get_peering_state() = 0; virtual const pg_shard_t& get_pg_whoami() const = 0; virtual const spg_t& get_pgid() const = 0; virtual RecoveryBackend* get_recovery_backend() = 0; virtual bool is_unreadable_object(const hobject_t&, eversion_t* v = 0) const = 0; virtual bool has_reset_since(epoch_t) const = 0; virtual std::vector<pg_shard_t> get_replica_recovery_order() const = 0; virtual epoch_t get_last_peering_reset() const = 0; virtual const pg_info_t& get_info() const= 0; virtual seastar::future<> stop() = 0; virtual void publish_stats_to_osd() = 0; };
1,286
31.175
83
h
null
ceph-main/src/crimson/osd/pg_shard_manager.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "crimson/osd/pg_shard_manager.h" #include "crimson/osd/pg.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { seastar::future<> PGShardManager::load_pgs(crimson::os::FuturizedStore& store) { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return store.list_collections( ).then([this](auto colls_cores) { return seastar::parallel_for_each( colls_cores, [this](auto coll_core) { auto[coll, shard_core] = coll_core; spg_t pgid; if (coll.is_pg(&pgid)) { return pg_to_shard_mapping.maybe_create_pg( pgid, shard_core ).then([this, pgid] (auto core) { return this->template with_remote_shard_state( core, [pgid]( PerShardState &per_shard_state, ShardServices &shard_services) { return shard_services.load_pg( pgid ).then([pgid, &per_shard_state](auto &&pg) { logger().info("load_pgs: loaded {}", pgid); per_shard_state.pg_map.pg_loaded(pgid, std::move(pg)); return seastar::now(); }); }); }); } else if (coll.is_temp(&pgid)) { logger().warn( "found temp collection on crimson osd, should be impossible: {}", coll); ceph_assert(0 == "temp collection on crimson osd, should be impossible"); return seastar::now(); } else { logger().warn("ignoring unrecognized collection: {}", coll); return seastar::now(); } }); }); } seastar::future<> PGShardManager::stop_pgs() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return shard_services.invoke_on_all([](auto &local_service) { return local_service.local_state.stop_pgs(); }); } seastar::future<std::map<pg_t, pg_stat_t>> PGShardManager::get_pg_stats() const { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return shard_services.map_reduce0( [](auto &local) { return local.local_state.get_pg_stats(); }, std::map<pg_t, pg_stat_t>(), [](auto &&left, auto &&right) { left.merge(std::move(right)); return std::move(left); }); } seastar::future<> PGShardManager::broadcast_map_to_pgs(epoch_t epoch) { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return shard_services.invoke_on_all([epoch](auto &local_service) { return local_service.local_state.broadcast_map_to_pgs( local_service, epoch ); }).then([this, epoch] { logger().debug("PGShardManager::broadcast_map_to_pgs " "broadcasted up to {}", epoch); return shard_services.invoke_on_all([epoch](auto &local_service) { local_service.local_state.osdmap_gate.got_map(epoch); return seastar::now(); }); }); } seastar::future<> PGShardManager::set_up_epoch(epoch_t e) { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return shard_services.invoke_on_all( seastar::smp_submit_to_options{}, [e](auto &local_service) { local_service.local_state.set_up_epoch(e); return seastar::now(); }); } }
3,164
28.036697
78
cc
null
ceph-main/src/crimson/osd/pg_shard_manager.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/future.hh> #include <seastar/core/shared_future.hh> #include <seastar/core/sharded.hh> #include "crimson/osd/shard_services.h" #include "crimson/osd/pg_map.h" namespace crimson::os { class FuturizedStore; } namespace crimson::osd { /** * PGShardManager * * Manages all state required to partition PGs over seastar reactors * as well as state required to route messages to pgs. Mediates access to * shared resources required by PGs (objectstore, messenger, monclient, * etc) */ class PGShardManager { seastar::sharded<OSDSingletonState> &osd_singleton_state; seastar::sharded<ShardServices> &shard_services; PGShardMapping &pg_to_shard_mapping; #define FORWARD_CONST(FROM_METHOD, TO_METHOD, TARGET) \ template <typename... Args> \ auto FROM_METHOD(Args&&... args) const { \ return TARGET.TO_METHOD(std::forward<Args>(args)...); \ } #define FORWARD(FROM_METHOD, TO_METHOD, TARGET) \ template <typename... Args> \ auto FROM_METHOD(Args&&... args) { \ return TARGET.TO_METHOD(std::forward<Args>(args)...); \ } #define FORWARD_TO_OSD_SINGLETON(METHOD) \ FORWARD(METHOD, METHOD, get_osd_singleton_state()) public: using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; PGShardManager( seastar::sharded<OSDSingletonState> &osd_singleton_state, seastar::sharded<ShardServices> &shard_services, PGShardMapping &pg_to_shard_mapping) : osd_singleton_state(osd_singleton_state), shard_services(shard_services), pg_to_shard_mapping(pg_to_shard_mapping) {} auto &get_osd_singleton_state() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return osd_singleton_state.local(); } auto &get_osd_singleton_state() const { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return osd_singleton_state.local(); } auto &get_shard_services() { return shard_services.local(); } auto &get_shard_services() const { return shard_services.local(); } auto &get_local_state() { return get_shard_services().local_state; } auto &get_local_state() const { return get_shard_services().local_state; } seastar::future<> update_map(local_cached_map_t &&map) { get_osd_singleton_state().update_map( make_local_shared_foreign(local_cached_map_t(map)) ); /* We need each core to get its own foreign_ptr<local_cached_map_t>. * foreign_ptr can't be cheaply copied, so we make one for each core * up front. */ return seastar::do_with( std::vector<seastar::foreign_ptr<local_cached_map_t>>(), [this, map](auto &fmaps) { fmaps.resize(seastar::smp::count); for (auto &i: fmaps) { i = seastar::foreign_ptr(map); } return shard_services.invoke_on_all( [&fmaps](auto &local) mutable { local.local_state.update_map( make_local_shared_foreign( std::move(fmaps[seastar::this_shard_id()]) )); }); }); } seastar::future<> stop_registries() { return shard_services.invoke_on_all([](auto &local) { return local.local_state.stop_registry(); }); } FORWARD_TO_OSD_SINGLETON(send_pg_created) // osd state forwards FORWARD(is_active, is_active, get_shard_services().local_state.osd_state) FORWARD(is_preboot, is_preboot, get_shard_services().local_state.osd_state) FORWARD(is_booting, is_booting, get_shard_services().local_state.osd_state) FORWARD(is_stopping, is_stopping, get_shard_services().local_state.osd_state) FORWARD(is_prestop, is_prestop, get_shard_services().local_state.osd_state) FORWARD(is_initializing, is_initializing, get_shard_services().local_state.osd_state) FORWARD(set_prestop, set_prestop, get_shard_services().local_state.osd_state) FORWARD(set_preboot, set_preboot, get_shard_services().local_state.osd_state) FORWARD(set_booting, set_booting, get_shard_services().local_state.osd_state) FORWARD(set_stopping, set_stopping, get_shard_services().local_state.osd_state) FORWARD(set_active, set_active, get_shard_services().local_state.osd_state) FORWARD(when_active, when_active, get_shard_services().local_state.osd_state) FORWARD_CONST(get_osd_state_string, to_string, get_shard_services().local_state.osd_state) FORWARD(got_map, got_map, get_shard_services().local_state.osdmap_gate) FORWARD(wait_for_map, wait_for_map, get_shard_services().local_state.osdmap_gate) // Metacoll FORWARD_TO_OSD_SINGLETON(init_meta_coll) FORWARD_TO_OSD_SINGLETON(get_meta_coll) FORWARD_TO_OSD_SINGLETON(set_superblock) // Core OSDMap methods FORWARD_TO_OSD_SINGLETON(get_local_map) FORWARD_TO_OSD_SINGLETON(load_map_bl) FORWARD_TO_OSD_SINGLETON(load_map_bls) FORWARD_TO_OSD_SINGLETON(store_maps) seastar::future<> set_up_epoch(epoch_t e); template <typename F> auto with_remote_shard_state(core_id_t core, F &&f) { return shard_services.invoke_on( core, [f=std::move(f)](auto &target_shard_services) mutable { return std::invoke( std::move(f), target_shard_services.local_state, target_shard_services); }); } template <typename T, typename F> auto with_remote_shard_state_and_op( core_id_t core, typename T::IRef &&op, F &&f) { if (seastar::this_shard_id() == core) { auto &target_shard_services = shard_services.local(); return std::invoke( std::move(f), target_shard_services.local_state, target_shard_services, std::move(op)); } return op->prepare_remote_submission( ).then([op=std::move(op), f=std::move(f), this, core ](auto f_conn) mutable { return shard_services.invoke_on( core, [f=std::move(f), op=std::move(op), f_conn=std::move(f_conn) ](auto &target_shard_services) mutable { op->finish_remote_submission(std::move(f_conn)); return std::invoke( std::move(f), target_shard_services.local_state, target_shard_services, std::move(op)); }); }); } /// Runs opref on the appropriate core, creating the pg as necessary. template <typename T> seastar::future<> run_with_pg_maybe_create( typename T::IRef op ) { ceph_assert(op->use_count() == 1); auto &logger = crimson::get_logger(ceph_subsys_osd); static_assert(T::can_create()); logger.debug("{}: can_create", *op); get_local_state().registry.remove_from_registry(*op); return pg_to_shard_mapping.maybe_create_pg( op->get_pgid() ).then([this, op = std::move(op)](auto core) mutable { return this->template with_remote_shard_state_and_op<T>( core, std::move(op), [](PerShardState &per_shard_state, ShardServices &shard_services, typename T::IRef op) { per_shard_state.registry.add_to_registry(*op); auto &logger = crimson::get_logger(ceph_subsys_osd); auto &opref = *op; return opref.template with_blocking_event< PGMap::PGCreationBlockingEvent >([&shard_services, &opref]( auto &&trigger) { return shard_services.get_or_create_pg( std::move(trigger), opref.get_pgid(), std::move(opref.get_create_info()) ); }).safe_then([&logger, &shard_services, &opref](Ref<PG> pgref) { logger.debug("{}: have_pg", opref); return opref.with_pg(shard_services, pgref); }).handle_error( crimson::ct_error::ecanceled::handle([&logger, &opref](auto) { logger.debug("{}: pg creation canceled, dropping", opref); return seastar::now(); }) ).then([op=std::move(op)] {}); }); }); } /// Runs opref on the appropriate core, waiting for pg as necessary template <typename T> seastar::future<> run_with_pg_maybe_wait( typename T::IRef op ) { ceph_assert(op->use_count() == 1); auto &logger = crimson::get_logger(ceph_subsys_osd); static_assert(!T::can_create()); logger.debug("{}: !can_create", *op); get_local_state().registry.remove_from_registry(*op); return pg_to_shard_mapping.maybe_create_pg( op->get_pgid() ).then([this, op = std::move(op)](auto core) mutable { return this->template with_remote_shard_state_and_op<T>( core, std::move(op), [](PerShardState &per_shard_state, ShardServices &shard_services, typename T::IRef op) { per_shard_state.registry.add_to_registry(*op); auto &logger = crimson::get_logger(ceph_subsys_osd); auto &opref = *op; return opref.template with_blocking_event< PGMap::PGCreationBlockingEvent >([&shard_services, &opref]( auto &&trigger) { return shard_services.wait_for_pg( std::move(trigger), opref.get_pgid()); }).safe_then([&logger, &shard_services, &opref](Ref<PG> pgref) { logger.debug("{}: have_pg", opref); return opref.with_pg(shard_services, pgref); }).handle_error( crimson::ct_error::ecanceled::handle([&logger, &opref](auto) { logger.debug("{}: pg creation canceled, dropping", opref); return seastar::now(); }) ).then([op=std::move(op)] {}); }); }); } seastar::future<> load_pgs(crimson::os::FuturizedStore& store); seastar::future<> stop_pgs(); seastar::future<std::map<pg_t, pg_stat_t>> get_pg_stats() const; /** * invoke_method_on_each_shard_seq * * Invokes shard_services method on each shard sequentially. */ template <typename F, typename... Args> seastar::future<> invoke_on_each_shard_seq( F &&f) const { return sharded_map_seq( shard_services, [f=std::forward<F>(f)](const ShardServices &shard_services) mutable { return std::invoke( f, shard_services); }); } /** * for_each_pg * * Invokes f on each pg sequentially. Caller may rely on f not being * invoked concurrently on multiple cores. */ template <typename F> seastar::future<> for_each_pg(F &&f) const { return invoke_on_each_shard_seq( [f=std::move(f)](const auto &local_service) mutable { for (auto &pg: local_service.local_state.pg_map.get_pgs()) { std::apply(f, pg); } return seastar::now(); }); } /** * for_each_pgid * * Syncronously invokes f on each pgid */ template <typename F> void for_each_pgid(F &&f) const { return pg_to_shard_mapping.for_each_pgid( std::forward<F>(f)); } auto get_num_pgs() const { return pg_to_shard_mapping.get_num_pgs(); } seastar::future<> broadcast_map_to_pgs(epoch_t epoch); template <typename F> auto with_pg(spg_t pgid, F &&f) { core_id_t core = pg_to_shard_mapping.get_pg_mapping(pgid); return with_remote_shard_state( core, [pgid, f=std::move(f)](auto &local_state, auto &local_service) mutable { return std::invoke( std::move(f), local_state.pg_map.get_pg(pgid)); }); } template <typename T, typename... Args> auto start_pg_operation(Args&&... args) { auto op = get_local_state().registry.create_operation<T>( std::forward<Args>(args)...); auto &logger = crimson::get_logger(ceph_subsys_osd); logger.debug("{}: starting {}", *op, __func__); auto &opref = *op; auto id = op->get_id(); if constexpr (T::is_trackable) { op->template track_event<typename T::StartEvent>(); } auto fut = opref.template enter_stage<>( opref.get_connection_pipeline().await_active ).then([this, &opref, &logger] { logger.debug("{}: start_pg_operation in await_active stage", opref); return get_shard_services().local_state.osd_state.when_active(); }).then([&logger, &opref] { logger.debug("{}: start_pg_operation active, entering await_map", opref); return opref.template enter_stage<>( opref.get_connection_pipeline().await_map); }).then([this, &logger, &opref] { logger.debug("{}: start_pg_operation await_map stage", opref); using OSDMapBlockingEvent = OSD_OSDMapGate::OSDMapBlocker::BlockingEvent; return opref.template with_blocking_event<OSDMapBlockingEvent>( [this, &opref](auto &&trigger) { std::ignore = this; return get_shard_services().local_state.osdmap_gate.wait_for_map( std::move(trigger), opref.get_epoch(), &get_shard_services()); }); }).then([&logger, &opref](auto epoch) { logger.debug("{}: got map {}, entering get_pg", opref, epoch); return opref.template enter_stage<>( opref.get_connection_pipeline().get_pg); }).then([this, &logger, &opref, op=std::move(op)]() mutable { logger.debug("{}: in get_pg core {}", opref, seastar::this_shard_id()); logger.debug("{}: in get_pg", opref); if constexpr (T::can_create()) { logger.debug("{}: can_create", opref); return run_with_pg_maybe_create<T>(std::move(op)); } else { logger.debug("{}: !can_create", opref); return run_with_pg_maybe_wait<T>(std::move(op)); } }); return std::make_pair(id, std::move(fut)); } #undef FORWARD #undef FORWARD_CONST #undef FORWARD_TO_OSD_SINGLETON }; }
13,064
32.586118
92
h
null
ceph-main/src/crimson/osd/recovery_backend.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <fmt/format.h> #include "crimson/common/exception.h" #include "crimson/osd/recovery_backend.h" #include "crimson/osd/pg.h" #include "crimson/osd/pg_backend.h" #include "crimson/osd/osd_operations/background_recovery.h" #include "messages/MOSDFastDispatchOp.h" #include "osd/osd_types.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } hobject_t RecoveryBackend::get_temp_recovery_object( const hobject_t& target, eversion_t version) const { hobject_t hoid = target.make_temp_hobject(fmt::format("temp_recovering_{}_{}_{}_{}", pg.get_info().pgid, version, pg.get_info().history.same_interval_since, target.snap)); logger().debug("{} {}", __func__, hoid); return hoid; } void RecoveryBackend::clean_up(ceph::os::Transaction& t, std::string_view why) { for (auto& soid : temp_contents) { t.remove(pg.get_collection_ref()->get_cid(), ghobject_t(soid, ghobject_t::NO_GEN, pg.get_pg_whoami().shard)); } temp_contents.clear(); for (auto& [soid, recovery_waiter] : recovering) { if ((recovery_waiter->pull_info && recovery_waiter->pull_info->is_complete()) || (!recovery_waiter->pull_info && recovery_waiter->obc && recovery_waiter->obc->obs.exists)) { recovery_waiter->obc->interrupt( ::crimson::common::actingset_changed( pg.is_primary())); recovery_waiter->interrupt(why); } } recovering.clear(); } void RecoveryBackend::WaitForObjectRecovery::stop() { readable.set_exception( crimson::common::system_shutdown_exception()); recovered.set_exception( crimson::common::system_shutdown_exception()); pulled.set_exception( crimson::common::system_shutdown_exception()); for (auto& [pg_shard, pr] : pushes) { pr.set_exception( crimson::common::system_shutdown_exception()); } } void RecoveryBackend::handle_backfill_finish( MOSDPGBackfill& m, crimson::net::ConnectionRef conn) { logger().debug("{}", __func__); ceph_assert(!pg.is_primary()); ceph_assert(crimson::common::local_conf()->osd_kill_backfill_at != 1); auto reply = crimson::make_message<MOSDPGBackfill>( MOSDPGBackfill::OP_BACKFILL_FINISH_ACK, pg.get_osdmap_epoch(), m.query_epoch, spg_t(pg.get_pgid().pgid, pg.get_primary().shard)); reply->set_priority(pg.get_recovery_op_priority()); std::ignore = conn->send(std::move(reply)); shard_services.start_operation<crimson::osd::LocalPeeringEvent>( static_cast<crimson::osd::PG*>(&pg), pg.get_pg_whoami(), pg.get_pgid(), pg.get_osdmap_epoch(), pg.get_osdmap_epoch(), RecoveryDone{}); } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_backfill_progress( MOSDPGBackfill& m) { logger().debug("{}", __func__); ceph_assert(!pg.is_primary()); ceph_assert(crimson::common::local_conf()->osd_kill_backfill_at != 2); ObjectStore::Transaction t; pg.get_peering_state().update_backfill_progress( m.last_backfill, m.stats, m.op == MOSDPGBackfill::OP_BACKFILL_PROGRESS, t); logger().debug("RecoveryBackend::handle_backfill_progress: do_transaction..."); return shard_services.get_store().do_transaction( pg.get_collection_ref(), std::move(t)).or_terminate(); } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_backfill_finish_ack( MOSDPGBackfill& m) { logger().debug("{}", __func__); ceph_assert(pg.is_primary()); ceph_assert(crimson::common::local_conf()->osd_kill_backfill_at != 3); // TODO: // finish_recovery_op(hobject_t::get_max()); return seastar::now(); } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_backfill( MOSDPGBackfill& m, crimson::net::ConnectionRef conn) { logger().debug("{}", __func__); if (pg.old_peering_msg(m.map_epoch, m.query_epoch)) { logger().debug("{}: discarding {}", __func__, m); return seastar::now(); } switch (m.op) { case MOSDPGBackfill::OP_BACKFILL_FINISH: handle_backfill_finish(m, conn); [[fallthrough]]; case MOSDPGBackfill::OP_BACKFILL_PROGRESS: return handle_backfill_progress(m); case MOSDPGBackfill::OP_BACKFILL_FINISH_ACK: return handle_backfill_finish_ack(m); default: ceph_assert("unknown op type for pg backfill"); return seastar::now(); } } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_backfill_remove( MOSDPGBackfillRemove& m) { logger().debug("{} m.ls={}", __func__, m.ls); assert(m.get_type() == MSG_OSD_PG_BACKFILL_REMOVE); if (pg.can_discard_replica_op(m)) { logger().debug("{}: discarding {}", __func__, m); return seastar::now(); } ObjectStore::Transaction t; for ([[maybe_unused]] const auto& [soid, ver] : m.ls) { // TODO: the reserved space management. PG::try_reserve_recovery_space(). t.remove(pg.get_collection_ref()->get_cid(), ghobject_t(soid, ghobject_t::NO_GEN, pg.get_pg_whoami().shard)); } logger().debug("RecoveryBackend::handle_backfill_remove: do_transaction..."); return shard_services.get_store().do_transaction( pg.get_collection_ref(), std::move(t)).or_terminate(); } RecoveryBackend::interruptible_future<BackfillInterval> RecoveryBackend::scan_for_backfill( const hobject_t& start, [[maybe_unused]] const std::int64_t min, const std::int64_t max) { logger().debug("{} starting from {}", __func__, start); auto version_map = seastar::make_lw_shared<std::map<hobject_t, eversion_t>>(); return backend->list_objects(start, max).then_interruptible( [this, start, version_map] (auto&& ret) { auto&& [objects, next] = std::move(ret); return seastar::do_with( std::move(objects), [this, version_map](auto &objects) { return interruptor::parallel_for_each(objects, [this, version_map] (const hobject_t& object) -> interruptible_future<> { crimson::osd::ObjectContextRef obc; if (pg.is_primary()) { obc = pg.obc_registry.maybe_get_cached_obc(object); } if (obc) { if (obc->obs.exists) { logger().debug("scan_for_backfill found (primary): {} {}", object, obc->obs.oi.version); version_map->emplace(object, obc->obs.oi.version); } else { // if the object does not exist here, it must have been removed // between the collection_list_partial and here. This can happen // for the first item in the range, which is usually last_backfill. } return seastar::now(); } else { return backend->load_metadata(object).safe_then_interruptible( [version_map, object] (auto md) { if (md->os.exists) { logger().debug("scan_for_backfill found: {} {}", object, md->os.oi.version); version_map->emplace(object, md->os.oi.version); } return seastar::now(); }, PGBackend::load_metadata_ertr::assert_all{}); } }); }).then_interruptible([version_map, start=std::move(start), next=std::move(next), this] { BackfillInterval bi; bi.begin = std::move(start); bi.end = std::move(next); bi.version = pg.get_info().last_update; bi.objects = std::move(*version_map); logger().debug("{} BackfillInterval filled, leaving", "scan_for_backfill"); return seastar::make_ready_future<BackfillInterval>(std::move(bi)); }); }); } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_scan_get_digest( MOSDPGScan& m, crimson::net::ConnectionRef conn) { logger().debug("{}", __func__); if (false /* FIXME: check for backfill too full */) { std::ignore = shard_services.start_operation<crimson::osd::LocalPeeringEvent>( // TODO: abstract start_background_recovery static_cast<crimson::osd::PG*>(&pg), pg.get_pg_whoami(), pg.get_pgid(), pg.get_osdmap_epoch(), pg.get_osdmap_epoch(), PeeringState::BackfillTooFull()); return seastar::now(); } return scan_for_backfill( std::move(m.begin), crimson::common::local_conf().get_val<std::int64_t>("osd_backfill_scan_min"), crimson::common::local_conf().get_val<std::int64_t>("osd_backfill_scan_max") ).then_interruptible( [this, query_epoch=m.query_epoch, conn ](auto backfill_interval) { auto reply = crimson::make_message<MOSDPGScan>( MOSDPGScan::OP_SCAN_DIGEST, pg.get_pg_whoami(), pg.get_osdmap_epoch(), query_epoch, spg_t(pg.get_info().pgid.pgid, pg.get_primary().shard), backfill_interval.begin, backfill_interval.end); encode(backfill_interval.objects, reply->get_data()); return conn->send(std::move(reply)); }); } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_scan_digest( MOSDPGScan& m) { logger().debug("{}", __func__); // Check that from is in backfill_targets vector ceph_assert(pg.is_backfill_target(m.from)); BackfillInterval bi; bi.begin = m.begin; bi.end = m.end; { auto p = m.get_data().cbegin(); // take care to preserve ordering! bi.clear_objects(); ::decode_noclear(bi.objects, p); } shard_services.start_operation<crimson::osd::BackfillRecovery>( static_cast<crimson::osd::PG*>(&pg), shard_services, pg.get_osdmap_epoch(), crimson::osd::BackfillState::ReplicaScanned{ m.from, std::move(bi) }); return seastar::now(); } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_scan( MOSDPGScan& m, crimson::net::ConnectionRef conn) { logger().debug("{}", __func__); if (pg.old_peering_msg(m.map_epoch, m.query_epoch)) { logger().debug("{}: discarding {}", __func__, m); return seastar::now(); } switch (m.op) { case MOSDPGScan::OP_SCAN_GET_DIGEST: return handle_scan_get_digest(m, conn); case MOSDPGScan::OP_SCAN_DIGEST: return handle_scan_digest(m); default: // FIXME: move to errorator ceph_assert("unknown op type for pg scan"); return seastar::now(); } } RecoveryBackend::interruptible_future<> RecoveryBackend::handle_recovery_op( Ref<MOSDFastDispatchOp> m, crimson::net::ConnectionRef conn) { switch (m->get_header().type) { case MSG_OSD_PG_BACKFILL: return handle_backfill(*boost::static_pointer_cast<MOSDPGBackfill>(m), conn); case MSG_OSD_PG_BACKFILL_REMOVE: return handle_backfill_remove(*boost::static_pointer_cast<MOSDPGBackfillRemove>(m)); case MSG_OSD_PG_SCAN: return handle_scan(*boost::static_pointer_cast<MOSDPGScan>(m), conn); default: return seastar::make_exception_future<>( std::invalid_argument(fmt::format("invalid request type: {}", m->get_header().type))); } }
10,747
31.668693
93
cc
null
ceph-main/src/crimson/osd/recovery_backend.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/future.hh> #include "crimson/common/type_helpers.h" #include "crimson/os/futurized_store.h" #include "crimson/os/futurized_collection.h" #include "crimson/osd/pg_interval_interrupt_condition.h" #include "crimson/osd/object_context.h" #include "crimson/osd/shard_services.h" #include "messages/MOSDPGBackfill.h" #include "messages/MOSDPGBackfillRemove.h" #include "messages/MOSDPGScan.h" #include "osd/recovery_types.h" #include "osd/osd_types.h" namespace crimson::osd{ class PG; } class PGBackend; class RecoveryBackend { public: class WaitForObjectRecovery; public: template <typename T = void> using interruptible_future = ::crimson::interruptible::interruptible_future< ::crimson::osd::IOInterruptCondition, T>; using interruptor = ::crimson::interruptible::interruptor< ::crimson::osd::IOInterruptCondition>; RecoveryBackend(crimson::osd::PG& pg, crimson::osd::ShardServices& shard_services, crimson::os::CollectionRef coll, PGBackend* backend) : pg{pg}, shard_services{shard_services}, store{&shard_services.get_store()}, coll{coll}, backend{backend} {} virtual ~RecoveryBackend() {} WaitForObjectRecovery& add_recovering(const hobject_t& soid) { auto [it, added] = recovering.emplace(soid, new WaitForObjectRecovery{}); assert(added); return *(it->second); } WaitForObjectRecovery& get_recovering(const hobject_t& soid) { assert(is_recovering(soid)); return *(recovering.at(soid)); } void remove_recovering(const hobject_t& soid) { recovering.erase(soid); } bool is_recovering(const hobject_t& soid) const { return recovering.count(soid) != 0; } uint64_t total_recovering() const { return recovering.size(); } virtual interruptible_future<> handle_recovery_op( Ref<MOSDFastDispatchOp> m, crimson::net::ConnectionRef conn); virtual interruptible_future<> recover_object( const hobject_t& soid, eversion_t need) = 0; virtual interruptible_future<> recover_delete( const hobject_t& soid, eversion_t need) = 0; virtual interruptible_future<> push_delete( const hobject_t& soid, eversion_t need) = 0; interruptible_future<BackfillInterval> scan_for_backfill( const hobject_t& from, std::int64_t min, std::int64_t max); void on_peering_interval_change(ceph::os::Transaction& t) { clean_up(t, "new peering interval"); } seastar::future<> stop() { for (auto& [soid, recovery_waiter] : recovering) { recovery_waiter->stop(); } return on_stop(); } protected: crimson::osd::PG& pg; crimson::osd::ShardServices& shard_services; crimson::os::FuturizedStore::Shard* store; crimson::os::CollectionRef coll; PGBackend* backend; struct pull_info_t { pg_shard_t from; hobject_t soid; ObjectRecoveryProgress recovery_progress; ObjectRecoveryInfo recovery_info; crimson::osd::ObjectContextRef head_ctx; crimson::osd::ObjectContextRef obc; object_stat_sum_t stat; bool is_complete() const { return recovery_progress.is_complete(recovery_info); } }; struct push_info_t { ObjectRecoveryProgress recovery_progress; ObjectRecoveryInfo recovery_info; crimson::osd::ObjectContextRef obc; object_stat_sum_t stat; }; public: class WaitForObjectRecovery : public boost::intrusive_ref_counter< WaitForObjectRecovery, boost::thread_unsafe_counter>, public crimson::BlockerT<WaitForObjectRecovery> { seastar::shared_promise<> readable, recovered, pulled; std::map<pg_shard_t, seastar::shared_promise<>> pushes; public: static constexpr const char* type_name = "WaitForObjectRecovery"; crimson::osd::ObjectContextRef obc; std::optional<pull_info_t> pull_info; std::map<pg_shard_t, push_info_t> pushing; seastar::future<> wait_for_readable() { return readable.get_shared_future(); } seastar::future<> wait_for_pushes(pg_shard_t shard) { return pushes[shard].get_shared_future(); } seastar::future<> wait_for_recovered() { return recovered.get_shared_future(); } template <typename T, typename F> auto wait_track_blocking(T &trigger, F &&fut) { WaitForObjectRecoveryRef ref = this; return track_blocking( trigger, std::forward<F>(fut) ).finally([ref] {}); } template <typename T> seastar::future<> wait_for_recovered(T &trigger) { WaitForObjectRecoveryRef ref = this; return wait_track_blocking(trigger, recovered.get_shared_future()); } seastar::future<> wait_for_pull() { return pulled.get_shared_future(); } void set_readable() { readable.set_value(); } void set_recovered() { recovered.set_value(); } void set_pushed(pg_shard_t shard) { pushes[shard].set_value(); } void set_pulled() { pulled.set_value(); } void set_push_failed(pg_shard_t shard, std::exception_ptr e) { pushes.at(shard).set_exception(e); } void interrupt(std::string_view why) { readable.set_exception(std::system_error( std::make_error_code(std::errc::interrupted), why.data())); recovered.set_exception(std::system_error( std::make_error_code(std::errc::interrupted), why.data())); pulled.set_exception(std::system_error( std::make_error_code(std::errc::interrupted), why.data())); for (auto& [pg_shard, pr] : pushes) { pr.set_exception(std::system_error( std::make_error_code(std::errc::interrupted), why.data())); } } void stop(); void dump_detail(Formatter* f) const { } }; using RecoveryBlockingEvent = crimson::AggregateBlockingEvent<WaitForObjectRecovery::BlockingEvent>; using WaitForObjectRecoveryRef = boost::intrusive_ptr<WaitForObjectRecovery>; protected: std::map<hobject_t, WaitForObjectRecoveryRef> recovering; hobject_t get_temp_recovery_object( const hobject_t& target, eversion_t version) const; boost::container::flat_set<hobject_t> temp_contents; void add_temp_obj(const hobject_t &oid) { temp_contents.insert(oid); } void clear_temp_obj(const hobject_t &oid) { temp_contents.erase(oid); } void clean_up(ceph::os::Transaction& t, std::string_view why); virtual seastar::future<> on_stop() = 0; private: void handle_backfill_finish( MOSDPGBackfill& m, crimson::net::ConnectionRef conn); interruptible_future<> handle_backfill_progress( MOSDPGBackfill& m); interruptible_future<> handle_backfill_finish_ack( MOSDPGBackfill& m); interruptible_future<> handle_backfill( MOSDPGBackfill& m, crimson::net::ConnectionRef conn); interruptible_future<> handle_scan_get_digest( MOSDPGScan& m, crimson::net::ConnectionRef conn); interruptible_future<> handle_scan_digest( MOSDPGScan& m); interruptible_future<> handle_scan( MOSDPGScan& m, crimson::net::ConnectionRef conn); interruptible_future<> handle_backfill_remove(MOSDPGBackfillRemove& m); };
7,161
29.606838
79
h
null
ceph-main/src/crimson/osd/replicated_backend.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "replicated_backend.h" #include "messages/MOSDRepOpReply.h" #include "crimson/common/exception.h" #include "crimson/common/log.h" #include "crimson/os/futurized_store.h" #include "crimson/osd/shard_services.h" #include "osd/PeeringState.h" SET_SUBSYS(osd); ReplicatedBackend::ReplicatedBackend(pg_t pgid, pg_shard_t whoami, ReplicatedBackend::CollectionRef coll, crimson::osd::ShardServices& shard_services, DoutPrefixProvider &dpp) : PGBackend{whoami.shard, coll, shard_services, dpp}, pgid{pgid}, whoami{whoami} {} ReplicatedBackend::ll_read_ierrorator::future<ceph::bufferlist> ReplicatedBackend::_read(const hobject_t& hoid, const uint64_t off, const uint64_t len, const uint32_t flags) { return store->read(coll, ghobject_t{hoid}, off, len, flags); } ReplicatedBackend::rep_op_fut_t ReplicatedBackend::_submit_transaction(std::set<pg_shard_t>&& pg_shards, const hobject_t& hoid, ceph::os::Transaction&& txn, osd_op_params_t&& osd_op_p, epoch_t min_epoch, epoch_t map_epoch, std::vector<pg_log_entry_t>&& log_entries) { LOG_PREFIX(ReplicatedBackend::_submit_transaction); const ceph_tid_t tid = shard_services.get_tid(); auto pending_txn = pending_trans.try_emplace(tid, pg_shards.size(), osd_op_p.at_version).first; bufferlist encoded_txn; encode(txn, encoded_txn); DEBUGDPP("object {}", dpp, hoid); auto all_completed = interruptor::make_interruptible( shard_services.get_store().do_transaction(coll, std::move(txn)) ).then_interruptible([FNAME, this, peers=pending_txn->second.weak_from_this()] { if (!peers) { // for now, only actingset_changed can cause peers // to be nullptr ERRORDPP("peers is null, this should be impossible", dpp); assert(0 == "impossible"); } if (--peers->pending == 0) { peers->all_committed.set_value(); peers->all_committed = {}; return seastar::now(); } return peers->all_committed.get_shared_future(); }).then_interruptible([pending_txn, this] { auto acked_peers = std::move(pending_txn->second.acked_peers); pending_trans.erase(pending_txn); return seastar::make_ready_future<crimson::osd::acked_peers_t>(std::move(acked_peers)); }); auto sends = std::make_unique<std::vector<seastar::future<>>>(); for (auto pg_shard : pg_shards) { if (pg_shard != whoami) { auto m = crimson::make_message<MOSDRepOp>( osd_op_p.req_id, whoami, spg_t{pgid, pg_shard.shard}, hoid, CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK, map_epoch, min_epoch, tid, osd_op_p.at_version); m->set_data(encoded_txn); pending_txn->second.acked_peers.push_back({pg_shard, eversion_t{}}); encode(log_entries, m->logbl); m->pg_trim_to = osd_op_p.pg_trim_to; m->min_last_complete_ondisk = osd_op_p.min_last_complete_ondisk; m->set_rollback_to(osd_op_p.at_version); // TODO: set more stuff. e.g., pg_states sends->emplace_back(shard_services.send_to_osd(pg_shard.osd, std::move(m), map_epoch)); } } auto sends_complete = seastar::when_all_succeed( sends->begin(), sends->end() ).finally([sends=std::move(sends)] {}); return {std::move(sends_complete), std::move(all_completed)}; } void ReplicatedBackend::on_actingset_changed(bool same_primary) { crimson::common::actingset_changed e_actingset_changed{same_primary}; for (auto& [tid, pending_txn] : pending_trans) { pending_txn.all_committed.set_exception(e_actingset_changed); } pending_trans.clear(); } void ReplicatedBackend::got_rep_op_reply(const MOSDRepOpReply& reply) { LOG_PREFIX(ReplicatedBackend::got_rep_op_reply); auto found = pending_trans.find(reply.get_tid()); if (found == pending_trans.end()) { WARNDPP("cannot find rep op for message {}", dpp, reply); return; } auto& peers = found->second; for (auto& peer : peers.acked_peers) { if (peer.shard == reply.from) { peer.last_complete_ondisk = reply.get_last_complete_ondisk(); if (--peers.pending == 0) { peers.all_committed.set_value(); peers.all_committed = {}; } return; } } } seastar::future<> ReplicatedBackend::stop() { LOG_PREFIX(ReplicatedBackend::stop); INFODPP("cid {}", coll->get_cid()); for (auto& [tid, pending_on] : pending_trans) { pending_on.all_committed.set_exception( crimson::common::system_shutdown_exception()); } pending_trans.clear(); return seastar::now(); } seastar::future<> ReplicatedBackend::request_committed(const osd_reqid_t& reqid, const eversion_t& at_version) { if (std::empty(pending_trans)) { return seastar::now(); } auto iter = pending_trans.begin(); auto& pending_txn = iter->second; if (pending_txn.at_version > at_version) { return seastar::now(); } for (; iter->second.at_version < at_version; ++iter); // As for now, the previous client_request with the same reqid // mustn't have finished, as that would mean later client_requests // has finished before earlier ones. // // The following line of code should be "assert(pending_txn.at_version == at_version)", // as there can be only one transaction at any time in pending_trans due to // PG::request_pg_pipeline. But there's a high possibility that we will // improve the parallelism here in the future, which means there may be multiple // client requests in flight, so we loosed the restriction to as follows. Correct // me if I'm wrong:-) assert(iter != pending_trans.end() && iter->second.at_version == at_version); if (iter->second.pending) { return iter->second.all_committed.get_shared_future(); } else { return seastar::now(); } }
6,094
33.828571
93
cc
null
ceph-main/src/crimson/osd/replicated_backend.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <boost/intrusive_ptr.hpp> #include <seastar/core/future.hh> #include <seastar/core/weak_ptr.hh> #include "include/buffer_fwd.h" #include "osd/osd_types.h" #include "acked_peers.h" #include "pg_backend.h" namespace crimson::osd { class ShardServices; } class ReplicatedBackend : public PGBackend { public: ReplicatedBackend(pg_t pgid, pg_shard_t whoami, CollectionRef coll, crimson::osd::ShardServices& shard_services, DoutPrefixProvider &dpp); void got_rep_op_reply(const MOSDRepOpReply& reply) final; seastar::future<> stop() final; void on_actingset_changed(bool same_primary) final; private: ll_read_ierrorator::future<ceph::bufferlist> _read(const hobject_t& hoid, uint64_t off, uint64_t len, uint32_t flags) override; rep_op_fut_t _submit_transaction(std::set<pg_shard_t>&& pg_shards, const hobject_t& hoid, ceph::os::Transaction&& txn, osd_op_params_t&& osd_op_p, epoch_t min_epoch, epoch_t max_epoch, std::vector<pg_log_entry_t>&& log_entries) final; const pg_t pgid; const pg_shard_t whoami; class pending_on_t : public seastar::weakly_referencable<pending_on_t> { public: pending_on_t(size_t pending, const eversion_t& at_version) : pending{static_cast<unsigned>(pending)}, at_version(at_version) {} unsigned pending; // The order of pending_txns' at_version must be the same as their // corresponding ceph_tid_t, as we rely on this condition for checking // whether a client request is already completed. To put it another // way, client requests at_version must be updated synchorously/simultaneously // with ceph_tid_t. const eversion_t at_version; crimson::osd::acked_peers_t acked_peers; seastar::shared_promise<> all_committed; }; using pending_transactions_t = std::map<ceph_tid_t, pending_on_t>; pending_transactions_t pending_trans; seastar::future<> request_committed( const osd_reqid_t& reqid, const eversion_t& at_version) final; };
2,109
33.032258
82
h
null
ceph-main/src/crimson/osd/replicated_recovery_backend.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #include <fmt/format.h> #include <fmt/ostream.h> #include <seastar/core/future.hh> #include <seastar/core/do_with.hh> #include "crimson/osd/pg.h" #include "crimson/osd/pg_backend.h" #include "osd/osd_types_fmt.h" #include "replicated_recovery_backend.h" #include "msg/Message.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } using std::less; using std::map; using std::string; RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::recover_object( const hobject_t& soid, eversion_t need) { logger().debug("{}: {}, {}", __func__, soid, need); // always add_recovering(soid) before recover_object(soid) assert(is_recovering(soid)); // start tracking the recovery of soid return maybe_pull_missing_obj(soid, need).then_interruptible([this, soid, need] { logger().debug("recover_object: loading obc: {}", soid); return pg.obc_loader.with_obc<RWState::RWREAD>(soid, [this, soid, need](auto obc) { logger().debug("recover_object: loaded obc: {}", obc->obs.oi.soid); auto& recovery_waiter = get_recovering(soid); recovery_waiter.obc = obc; recovery_waiter.obc->wait_recovery_read(); return maybe_push_shards(soid, need); }).handle_error_interruptible( crimson::osd::PG::load_obc_ertr::all_same_way([soid](auto& code) { // TODO: may need eio handling? logger().error("recover_object saw error code {}, ignoring object {}", code, soid); })); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::maybe_push_shards( const hobject_t& soid, eversion_t need) { return seastar::do_with( get_shards_to_push(soid), [this, need, soid](auto &shards) { return interruptor::parallel_for_each( shards, [this, need, soid](auto shard) { return prep_push(soid, need, shard).then_interruptible([this, soid, shard](auto push) { auto msg = crimson::make_message<MOSDPGPush>(); msg->from = pg.get_pg_whoami(); msg->pgid = pg.get_pgid(); msg->map_epoch = pg.get_osdmap_epoch(); msg->min_epoch = pg.get_last_peering_reset(); msg->pushes.push_back(std::move(push)); msg->set_priority(pg.get_recovery_op_priority()); return interruptor::make_interruptible( shard_services.send_to_osd(shard.osd, std::move(msg), pg.get_osdmap_epoch())) .then_interruptible( [this, soid, shard] { return get_recovering(soid).wait_for_pushes(shard); }); }); }); }).then_interruptible([this, soid] { auto &recovery = get_recovering(soid); if (auto push_info = recovery.pushing.begin(); push_info != recovery.pushing.end()) { pg.get_recovery_handler()->on_global_recover(soid, push_info->second.stat, false); } else if (recovery.pull_info) { // no push happened (empty get_shards_to_push()) but pull actually did pg.get_recovery_handler()->on_global_recover(soid, recovery.pull_info->stat, false); } else { // no pulls, no pushes } return seastar::make_ready_future<>(); }).handle_exception_interruptible([this, soid](auto e) { auto &recovery = get_recovering(soid); if (recovery.obc) { recovery.obc->drop_recovery_read(); } recovering.erase(soid); return seastar::make_exception_future<>(e); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::maybe_pull_missing_obj( const hobject_t& soid, eversion_t need) { pg_missing_tracker_t local_missing = pg.get_local_missing(); if (!local_missing.is_missing(soid)) { return seastar::make_ready_future<>(); } PullOp pull_op; auto& recovery_waiter = get_recovering(soid); recovery_waiter.pull_info = std::make_optional<RecoveryBackend::pull_info_t>(); auto& pull_info = *recovery_waiter.pull_info; prepare_pull(pull_op, pull_info, soid, need); auto msg = crimson::make_message<MOSDPGPull>(); msg->from = pg.get_pg_whoami(); msg->set_priority(pg.get_recovery_op_priority()); msg->pgid = pg.get_pgid(); msg->map_epoch = pg.get_osdmap_epoch(); msg->min_epoch = pg.get_last_peering_reset(); msg->set_pulls({std::move(pull_op)}); return interruptor::make_interruptible( shard_services.send_to_osd( pull_info.from.osd, std::move(msg), pg.get_osdmap_epoch() )).then_interruptible([&recovery_waiter] { return recovery_waiter.wait_for_pull(); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::push_delete( const hobject_t& soid, eversion_t need) { logger().debug("{}: {}, {}", __func__, soid, need); epoch_t min_epoch = pg.get_last_peering_reset(); assert(pg.get_acting_recovery_backfill().size() > 0); return interruptor::parallel_for_each(pg.get_acting_recovery_backfill(), [this, soid, need, min_epoch](pg_shard_t shard) -> interruptible_future<> { if (shard == pg.get_pg_whoami()) return seastar::make_ready_future<>(); auto iter = pg.get_shard_missing().find(shard); if (iter == pg.get_shard_missing().end()) return seastar::make_ready_future<>(); if (iter->second.is_missing(soid)) { logger().debug("push_delete: will remove {} from {}", soid, shard); pg.begin_peer_recover(shard, soid); spg_t target_pg(pg.get_info().pgid.pgid, shard.shard); auto msg = crimson::make_message<MOSDPGRecoveryDelete>( pg.get_pg_whoami(), target_pg, pg.get_osdmap_epoch(), min_epoch); msg->set_priority(pg.get_recovery_op_priority()); msg->objects.push_back(std::make_pair(soid, need)); return interruptor::make_interruptible( shard_services.send_to_osd(shard.osd, std::move(msg), pg.get_osdmap_epoch())).then_interruptible( [this, soid, shard] { return get_recovering(soid).wait_for_pushes(shard); }); } return seastar::make_ready_future<>(); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::handle_recovery_delete( Ref<MOSDPGRecoveryDelete> m) { logger().debug("{}: {}", __func__, *m); auto& p = m->objects.front(); //TODO: only one delete per message for now. return local_recover_delete(p.first, p.second, pg.get_osdmap_epoch()) .then_interruptible( [this, m] { auto reply = crimson::make_message<MOSDPGRecoveryDeleteReply>(); reply->from = pg.get_pg_whoami(); reply->set_priority(m->get_priority()); reply->pgid = spg_t(pg.get_info().pgid.pgid, m->from.shard); reply->map_epoch = m->map_epoch; reply->min_epoch = m->min_epoch; reply->objects = m->objects; return shard_services.send_to_osd(m->from.osd, std::move(reply), pg.get_osdmap_epoch()); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::on_local_recover_persist( const hobject_t& soid, const ObjectRecoveryInfo& _recovery_info, bool is_delete, epoch_t epoch_frozen) { logger().debug("{}", __func__); ceph::os::Transaction t; pg.get_recovery_handler()->on_local_recover(soid, _recovery_info, is_delete, t); logger().debug("ReplicatedRecoveryBackend::on_local_recover_persist: do_transaction..."); return interruptor::make_interruptible( shard_services.get_store().do_transaction(coll, std::move(t))) .then_interruptible( [this, epoch_frozen, last_complete = pg.get_info().last_complete] { pg.get_recovery_handler()->_committed_pushed_object(epoch_frozen, last_complete); return seastar::make_ready_future<>(); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::local_recover_delete( const hobject_t& soid, eversion_t need, epoch_t epoch_to_freeze) { logger().debug("{}: {}, {}", __func__, soid, need); return backend->load_metadata(soid).safe_then_interruptible([this] (auto lomt) -> interruptible_future<> { if (lomt->os.exists) { return seastar::do_with(ceph::os::Transaction(), [this, lomt = std::move(lomt)](auto& txn) { return backend->remove(lomt->os, txn).then_interruptible( [this, &txn]() mutable { logger().debug("ReplicatedRecoveryBackend::local_recover_delete: do_transaction..."); return shard_services.get_store().do_transaction(coll, std::move(txn)); }); }); } return seastar::make_ready_future<>(); }).safe_then_interruptible([this, soid, epoch_to_freeze, need] { ObjectRecoveryInfo recovery_info; recovery_info.soid = soid; recovery_info.version = need; return on_local_recover_persist(soid, recovery_info, true, epoch_to_freeze); }, PGBackend::load_metadata_ertr::all_same_way( [this, soid, epoch_to_freeze, need] (auto e) { ObjectRecoveryInfo recovery_info; recovery_info.soid = soid; recovery_info.version = need; return on_local_recover_persist(soid, recovery_info, true, epoch_to_freeze); }) ); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::recover_delete( const hobject_t &soid, eversion_t need) { logger().debug("{}: {}, {}", __func__, soid, need); epoch_t cur_epoch = pg.get_osdmap_epoch(); return seastar::do_with(object_stat_sum_t(), [this, soid, need, cur_epoch](auto& stat_diff) { return local_recover_delete(soid, need, cur_epoch).then_interruptible( [this, &stat_diff, cur_epoch, soid, need]() -> interruptible_future<> { if (!pg.has_reset_since(cur_epoch)) { bool object_missing = false; for (const auto& shard : pg.get_acting_recovery_backfill()) { if (shard == pg.get_pg_whoami()) continue; if (pg.get_shard_missing(shard)->is_missing(soid)) { logger().debug("recover_delete: soid {} needs to deleted from replca {}", soid, shard); object_missing = true; break; } } if (!object_missing) { stat_diff.num_objects_recovered = 1; return seastar::make_ready_future<>(); } else { return push_delete(soid, need); } } return seastar::make_ready_future<>(); }).then_interruptible([this, soid, &stat_diff] { pg.get_recovery_handler()->on_global_recover(soid, stat_diff, true); return seastar::make_ready_future<>(); }); }); } RecoveryBackend::interruptible_future<PushOp> ReplicatedRecoveryBackend::prep_push( const hobject_t& soid, eversion_t need, pg_shard_t pg_shard) { logger().debug("{}: {}, {}", __func__, soid, need); auto& recovery_waiter = get_recovering(soid); auto& obc = recovery_waiter.obc; interval_set<uint64_t> data_subset; if (obc->obs.oi.size) { data_subset.insert(0, obc->obs.oi.size); } const auto& missing = pg.get_shard_missing().find(pg_shard)->second; const auto it = missing.get_items().find(soid); assert(it != missing.get_items().end()); data_subset.intersection_of(it->second.clean_regions.get_dirty_regions()); logger().debug("prep_push: {} data_subset {} to {}", soid, data_subset, pg_shard); auto& push_info = recovery_waiter.pushing[pg_shard]; pg.begin_peer_recover(pg_shard, soid); const auto pmissing_iter = pg.get_shard_missing().find(pg_shard); const auto missing_iter = pmissing_iter->second.get_items().find(soid); assert(missing_iter != pmissing_iter->second.get_items().end()); push_info.obc = obc; push_info.recovery_info.size = obc->obs.oi.size; push_info.recovery_info.copy_subset = data_subset; push_info.recovery_info.soid = soid; push_info.recovery_info.oi = obc->obs.oi; push_info.recovery_info.version = obc->obs.oi.version; push_info.recovery_info.object_exist = missing_iter->second.clean_regions.object_is_exist(); push_info.recovery_progress.omap_complete = !missing_iter->second.clean_regions.omap_is_dirty(); return build_push_op(push_info.recovery_info, push_info.recovery_progress, &push_info.stat).then_interruptible( [this, soid, pg_shard](auto push_op) { auto& recovery_waiter = get_recovering(soid); auto& push_info = recovery_waiter.pushing[pg_shard]; push_info.recovery_progress = push_op.after_progress; return push_op; }); } void ReplicatedRecoveryBackend::prepare_pull(PullOp& pull_op, pull_info_t& pull_info, const hobject_t& soid, eversion_t need) { logger().debug("{}: {}, {}", __func__, soid, need); pg_missing_tracker_t local_missing = pg.get_local_missing(); const auto missing_iter = local_missing.get_items().find(soid); auto m = pg.get_missing_loc_shards(); pg_shard_t fromshard = *(m[soid].begin()); //TODO: skipped snap objects case for now pull_op.recovery_info.copy_subset.insert(0, (uint64_t) -1); pull_op.recovery_info.copy_subset.intersection_of( missing_iter->second.clean_regions.get_dirty_regions()); pull_op.recovery_info.size = ((uint64_t) -1); pull_op.recovery_info.object_exist = missing_iter->second.clean_regions.object_is_exist(); pull_op.recovery_info.soid = soid; pull_op.soid = soid; pull_op.recovery_progress.data_complete = false; pull_op.recovery_progress.omap_complete = !missing_iter->second.clean_regions.omap_is_dirty(); pull_op.recovery_progress.data_recovered_to = 0; pull_op.recovery_progress.first = true; pull_info.from = fromshard; pull_info.soid = soid; pull_info.recovery_info = pull_op.recovery_info; pull_info.recovery_progress = pull_op.recovery_progress; } RecoveryBackend::interruptible_future<PushOp> ReplicatedRecoveryBackend::build_push_op( const ObjectRecoveryInfo& recovery_info, const ObjectRecoveryProgress& progress, object_stat_sum_t* stat) { logger().debug("{} {} @{}", __func__, recovery_info.soid, recovery_info.version); return seastar::do_with(ObjectRecoveryProgress(progress), uint64_t(crimson::common::local_conf() ->osd_recovery_max_chunk), recovery_info.version, PushOp(), [this, &recovery_info, &progress, stat] (auto& new_progress, auto& available, auto& v, auto& push_op) { return read_metadata_for_push_op(recovery_info.soid, progress, new_progress, v, &push_op ).then_interruptible([&](eversion_t local_ver) mutable { // If requestor didn't know the version, use ours if (v == eversion_t()) { v = local_ver; } else if (v != local_ver) { logger().error("build_push_op: {} push {} v{} failed because local copy is {}", pg.get_pgid(), recovery_info.soid, recovery_info.version, local_ver); // TODO: bail out } return read_omap_for_push_op(recovery_info.soid, progress, new_progress, available, &push_op); }).then_interruptible([this, &recovery_info, &progress, &available, &push_op]() mutable { logger().debug("build_push_op: available: {}, copy_subset: {}", available, recovery_info.copy_subset); return read_object_for_push_op(recovery_info.soid, recovery_info.copy_subset, progress.data_recovered_to, available, &push_op); }).then_interruptible([&recovery_info, &v, &progress, &new_progress, stat, &push_op] (uint64_t recovered_to) mutable { new_progress.data_recovered_to = recovered_to; if (new_progress.is_complete(recovery_info)) { new_progress.data_complete = true; if (stat) stat->num_objects_recovered++; } else if (progress.first && progress.omap_complete) { // If omap is not changed, we need recovery omap // when recovery cannot be completed once new_progress.omap_complete = false; } if (stat) { stat->num_keys_recovered += push_op.omap_entries.size(); stat->num_bytes_recovered += push_op.data.length(); } push_op.version = v; push_op.soid = recovery_info.soid; push_op.recovery_info = recovery_info; push_op.after_progress = new_progress; push_op.before_progress = progress; logger().debug("build_push_op: push_op version:" " {}, push_op data length: {}", push_op.version, push_op.data.length()); return seastar::make_ready_future<PushOp>(std::move(push_op)); }); }); } RecoveryBackend::interruptible_future<eversion_t> ReplicatedRecoveryBackend::read_metadata_for_push_op( const hobject_t& oid, const ObjectRecoveryProgress& progress, ObjectRecoveryProgress& new_progress, eversion_t ver, PushOp* push_op) { logger().debug("{}, {}", __func__, oid); if (!progress.first) { return seastar::make_ready_future<eversion_t>(ver); } return interruptor::make_interruptible(interruptor::when_all_succeed( backend->omap_get_header(coll, ghobject_t(oid)).handle_error_interruptible<false>( crimson::os::FuturizedStore::Shard::read_errorator::all_same_way( [oid] (const std::error_code& e) { logger().debug("read_metadata_for_push_op, error {} when getting omap header: {}", e, oid); return seastar::make_ready_future<bufferlist>(); })), interruptor::make_interruptible(store->get_attrs(coll, ghobject_t(oid))) .handle_error_interruptible<false>( crimson::os::FuturizedStore::Shard::get_attrs_ertr::all_same_way( [oid] (const std::error_code& e) { logger().debug("read_metadata_for_push_op, error {} when getting attrs: {}", e, oid); return seastar::make_ready_future<crimson::os::FuturizedStore::Shard::attrs_t>(); })) )).then_unpack_interruptible([&new_progress, push_op](auto bl, auto attrs) { if (bl.length() == 0) { logger().warn("read_metadata_for_push_op: fail to read omap header"); } else if (attrs.empty()) { logger().error("read_metadata_for_push_op: fail to read attrs"); return eversion_t{}; } push_op->omap_header.claim_append(std::move(bl)); for (auto&& [key, val] : attrs) { push_op->attrset.emplace(std::move(key), std::move(val)); } logger().debug("read_metadata_for_push_op: {}", push_op->attrset[OI_ATTR]); object_info_t oi; oi.decode_no_oid(push_op->attrset[OI_ATTR]); new_progress.first = false; return oi.version; }); } RecoveryBackend::interruptible_future<uint64_t> ReplicatedRecoveryBackend::read_object_for_push_op( const hobject_t& oid, const interval_set<uint64_t>& copy_subset, uint64_t offset, uint64_t max_len, PushOp* push_op) { if (max_len == 0 || copy_subset.empty()) { push_op->data_included.clear(); return seastar::make_ready_future<uint64_t>(offset); } // 1. get the extents in the interested range return interruptor::make_interruptible(backend->fiemap(coll, ghobject_t{oid}, 0, copy_subset.range_end())).safe_then_interruptible( [=, this](auto&& fiemap_included) mutable { interval_set<uint64_t> extents; try { extents.intersection_of(copy_subset, std::move(fiemap_included)); } catch (std::exception &) { // if fiemap() fails, we will read nothing, as the intersection of // copy_subset and an empty interval_set would be empty anyway extents.clear(); } // 2. we can read up to "max_len" bytes from "offset", so truncate the // extents down to this quota. no need to return the number of consumed // bytes, as this is the last consumer of this quota push_op->data_included.span_of(extents, offset, max_len); // 3. read the truncated extents // TODO: check if the returned extents are pruned return interruptor::make_interruptible(store->readv(coll, ghobject_t{oid}, push_op->data_included, 0)); }).safe_then_interruptible([push_op, range_end=copy_subset.range_end()](auto &&bl) { push_op->data.claim_append(std::move(bl)); uint64_t recovered_to = 0; if (push_op->data_included.empty()) { // zero filled section, skip to end! recovered_to = range_end; } else { // note down the progress, we will start from there next time recovered_to = push_op->data_included.range_end(); } return seastar::make_ready_future<uint64_t>(recovered_to); }, PGBackend::read_errorator::all_same_way([](auto e) { logger().debug("build_push_op: read exception"); return seastar::make_exception_future<uint64_t>(e); })); } static std::optional<std::string> nullopt_if_empty(const std::string& s) { return s.empty() ? std::nullopt : std::make_optional(s); } static bool is_too_many_entries_per_chunk(const PushOp* push_op) { const uint64_t entries_per_chunk = crimson::common::local_conf()->osd_recovery_max_omap_entries_per_chunk; if (!entries_per_chunk) { // the limit is disabled return false; } return push_op->omap_entries.size() >= entries_per_chunk; } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::read_omap_for_push_op( const hobject_t& oid, const ObjectRecoveryProgress& progress, ObjectRecoveryProgress& new_progress, uint64_t& max_len, PushOp* push_op) { if (progress.omap_complete) { return seastar::make_ready_future<>(); } return seastar::repeat([&new_progress, &max_len, push_op, &oid, this] { return shard_services.get_store().omap_get_values( coll, ghobject_t{oid}, nullopt_if_empty(new_progress.omap_recovered_to) ).safe_then([&new_progress, &max_len, push_op](const auto& ret) { const auto& [done, kvs] = ret; bool stop = done; // assuming "values.empty() only if done" holds here! for (const auto& [key, value] : kvs) { if (is_too_many_entries_per_chunk(push_op)) { stop = true; break; } if (const uint64_t entry_size = key.size() + value.length(); entry_size > max_len) { stop = true; break; } else { max_len -= std::min(max_len, entry_size); } push_op->omap_entries.emplace(key, value); } if (!push_op->omap_entries.empty()) { // we iterate in order new_progress.omap_recovered_to = std::rbegin(push_op->omap_entries)->first; } if (done) { new_progress.omap_complete = true; } return seastar::make_ready_future<seastar::stop_iteration>( stop ? seastar::stop_iteration::yes : seastar::stop_iteration::no ); }, crimson::os::FuturizedStore::Shard::read_errorator::assert_all{}); }); } std::vector<pg_shard_t> ReplicatedRecoveryBackend::get_shards_to_push(const hobject_t& soid) const { std::vector<pg_shard_t> shards; assert(pg.get_acting_recovery_backfill().size() > 0); for (const auto& peer : pg.get_acting_recovery_backfill()) { if (peer == pg.get_pg_whoami()) continue; auto shard_missing = pg.get_shard_missing().find(peer); assert(shard_missing != pg.get_shard_missing().end()); if (shard_missing->second.is_missing(soid)) { shards.push_back(shard_missing->first); } } return shards; } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::handle_pull(Ref<MOSDPGPull> m) { logger().debug("{}: {}", __func__, *m); if (pg.can_discard_replica_op(*m)) { logger().debug("{}: discarding {}", __func__, *m); return seastar::now(); } return seastar::do_with(m->take_pulls(), [this, from=m->from](auto& pulls) { return interruptor::parallel_for_each(pulls, [this, from](auto& pull_op) { const hobject_t& soid = pull_op.soid; logger().debug("handle_pull: {}", soid); return backend->stat(coll, ghobject_t(soid)).then_interruptible( [this, &pull_op](auto st) { ObjectRecoveryInfo &recovery_info = pull_op.recovery_info; ObjectRecoveryProgress &progress = pull_op.recovery_progress; if (progress.first && recovery_info.size == ((uint64_t) -1)) { // Adjust size and copy_subset recovery_info.size = st.st_size; if (st.st_size) { interval_set<uint64_t> object_range; object_range.insert(0, st.st_size); recovery_info.copy_subset.intersection_of(object_range); } else { recovery_info.copy_subset.clear(); } assert(recovery_info.clone_subset.empty()); } return build_push_op(recovery_info, progress, 0); }).then_interruptible([this, from](auto push_op) { auto msg = crimson::make_message<MOSDPGPush>(); msg->from = pg.get_pg_whoami(); msg->pgid = pg.get_pgid(); msg->map_epoch = pg.get_osdmap_epoch(); msg->min_epoch = pg.get_last_peering_reset(); msg->set_priority(pg.get_recovery_op_priority()); msg->pushes.push_back(std::move(push_op)); return shard_services.send_to_osd(from.osd, std::move(msg), pg.get_osdmap_epoch()); }); }); }); } RecoveryBackend::interruptible_future<bool> ReplicatedRecoveryBackend::_handle_pull_response( pg_shard_t from, PushOp& push_op, PullOp* response, ceph::os::Transaction* t) { logger().debug("handle_pull_response {} {} data.size() is {} data_included: {}", push_op.recovery_info, push_op.after_progress, push_op.data.length(), push_op.data_included); const hobject_t &hoid = push_op.soid; auto& recovery_waiter = get_recovering(hoid); auto& pull_info = *recovery_waiter.pull_info; if (pull_info.recovery_info.size == (uint64_t(-1))) { pull_info.recovery_info.size = push_op.recovery_info.size; pull_info.recovery_info.copy_subset.intersection_of( push_op.recovery_info.copy_subset); } // If primary doesn't have object info and didn't know version if (pull_info.recovery_info.version == eversion_t()) pull_info.recovery_info.version = push_op.version; auto prepare_waiter = interruptor::make_interruptible( seastar::make_ready_future<>()); if (pull_info.recovery_progress.first) { prepare_waiter = pg.obc_loader.with_obc<RWState::RWNONE>( pull_info.recovery_info.soid, [&pull_info, &recovery_waiter, &push_op](auto obc) { pull_info.obc = obc; recovery_waiter.obc = obc; obc->obs.oi.decode_no_oid(push_op.attrset.at(OI_ATTR), push_op.soid); pull_info.recovery_info.oi = obc->obs.oi; return crimson::osd::PG::load_obc_ertr::now(); }).handle_error_interruptible(crimson::ct_error::assert_all{}); }; return prepare_waiter.then_interruptible( [this, &pull_info, &push_op, t, response]() mutable { const bool first = pull_info.recovery_progress.first; pull_info.recovery_progress = push_op.after_progress; logger().debug("new recovery_info {}, new progress {}", pull_info.recovery_info, pull_info.recovery_progress); interval_set<uint64_t> data_zeros; { uint64_t offset = push_op.before_progress.data_recovered_to; uint64_t length = (push_op.after_progress.data_recovered_to - push_op.before_progress.data_recovered_to); if (length) { data_zeros.insert(offset, length); } } auto [usable_intervals, data] = trim_pushed_data(pull_info.recovery_info.copy_subset, push_op.data_included, push_op.data); bool complete = pull_info.is_complete(); bool clear_omap = !push_op.before_progress.omap_complete; return submit_push_data(pull_info.recovery_info, first, complete, clear_omap, std::move(data_zeros), std::move(usable_intervals), std::move(data), std::move(push_op.omap_header), push_op.attrset, std::move(push_op.omap_entries), t) .then_interruptible( [this, response, &pull_info, &push_op, complete, t, bytes_recovered=data.length()] { pull_info.stat.num_keys_recovered += push_op.omap_entries.size(); pull_info.stat.num_bytes_recovered += bytes_recovered; if (complete) { pull_info.stat.num_objects_recovered++; pg.get_recovery_handler()->on_local_recover( push_op.soid, get_recovering(push_op.soid).pull_info->recovery_info, false, *t); return true; } else { response->soid = push_op.soid; response->recovery_info = pull_info.recovery_info; response->recovery_progress = pull_info.recovery_progress; return false; } }); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::handle_pull_response( Ref<MOSDPGPush> m) { if (pg.can_discard_replica_op(*m)) { logger().debug("{}: discarding {}", __func__, *m); return seastar::now(); } const PushOp& push_op = m->pushes[0]; //TODO: only one push per message for now. if (push_op.version == eversion_t()) { // replica doesn't have it! pg.get_recovery_handler()->on_failed_recover({ m->from }, push_op.soid, get_recovering(push_op.soid).pull_info->recovery_info.version); return seastar::make_exception_future<>( std::runtime_error(fmt::format( "Error on pushing side {} when pulling obj {}", m->from, push_op.soid))); } logger().debug("{}: {}", __func__, *m); return seastar::do_with(PullOp(), [this, m](auto& response) { return seastar::do_with(ceph::os::Transaction(), m.get(), [this, &response](auto& t, auto& m) { pg_shard_t from = m->from; PushOp& push_op = m->pushes[0]; // only one push per message for now return _handle_pull_response(from, push_op, &response, &t ).then_interruptible( [this, &t](bool complete) { epoch_t epoch_frozen = pg.get_osdmap_epoch(); logger().debug("ReplicatedRecoveryBackend::handle_pull_response: do_transaction..."); return shard_services.get_store().do_transaction(coll, std::move(t)) .then([this, epoch_frozen, complete, last_complete = pg.get_info().last_complete] { pg.get_recovery_handler()->_committed_pushed_object(epoch_frozen, last_complete); return seastar::make_ready_future<bool>(complete); }); }); }).then_interruptible([this, m, &response](bool complete) { if (complete) { auto& push_op = m->pushes[0]; get_recovering(push_op.soid).set_pulled(); return seastar::make_ready_future<>(); } else { auto reply = crimson::make_message<MOSDPGPull>(); reply->from = pg.get_pg_whoami(); reply->set_priority(m->get_priority()); reply->pgid = pg.get_info().pgid; reply->map_epoch = m->map_epoch; reply->min_epoch = m->min_epoch; reply->set_pulls({std::move(response)}); return shard_services.send_to_osd(m->from.osd, std::move(reply), pg.get_osdmap_epoch()); } }); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::_handle_push( pg_shard_t from, PushOp &push_op, PushReplyOp *response, ceph::os::Transaction *t) { logger().debug("{}", __func__); bool first = push_op.before_progress.first; interval_set<uint64_t> data_zeros; { uint64_t offset = push_op.before_progress.data_recovered_to; uint64_t length = (push_op.after_progress.data_recovered_to - push_op.before_progress.data_recovered_to); if (length) { data_zeros.insert(offset, length); } } bool complete = (push_op.after_progress.data_complete && push_op.after_progress.omap_complete); bool clear_omap = !push_op.before_progress.omap_complete; response->soid = push_op.recovery_info.soid; return submit_push_data(push_op.recovery_info, first, complete, clear_omap, std::move(data_zeros), std::move(push_op.data_included), std::move(push_op.data), std::move(push_op.omap_header), push_op.attrset, std::move(push_op.omap_entries), t) .then_interruptible( [this, complete, &push_op, t] { if (complete) { pg.get_recovery_handler()->on_local_recover( push_op.recovery_info.soid, push_op.recovery_info, false, *t); } }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::handle_push( Ref<MOSDPGPush> m) { if (pg.can_discard_replica_op(*m)) { logger().debug("{}: discarding {}", __func__, *m); return seastar::now(); } if (pg.is_primary()) { return handle_pull_response(m); } logger().debug("{}: {}", __func__, *m); return seastar::do_with(PushReplyOp(), [this, m](auto& response) { PushOp& push_op = m->pushes[0]; // TODO: only one push per message for now return seastar::do_with(ceph::os::Transaction(), [this, m, &push_op, &response](auto& t) { return _handle_push(m->from, push_op, &response, &t).then_interruptible( [this, &t] { epoch_t epoch_frozen = pg.get_osdmap_epoch(); logger().debug("ReplicatedRecoveryBackend::handle_push: do_transaction..."); return interruptor::make_interruptible( shard_services.get_store().do_transaction(coll, std::move(t))).then_interruptible( [this, epoch_frozen, last_complete = pg.get_info().last_complete] { //TODO: this should be grouped with pg.on_local_recover somehow. pg.get_recovery_handler()->_committed_pushed_object(epoch_frozen, last_complete); }); }); }).then_interruptible([this, m, &response]() mutable { auto reply = crimson::make_message<MOSDPGPushReply>(); reply->from = pg.get_pg_whoami(); reply->set_priority(m->get_priority()); reply->pgid = pg.get_info().pgid; reply->map_epoch = m->map_epoch; reply->min_epoch = m->min_epoch; std::vector<PushReplyOp> replies = { std::move(response) }; reply->replies.swap(replies); return shard_services.send_to_osd(m->from.osd, std::move(reply), pg.get_osdmap_epoch()); }); }); } RecoveryBackend::interruptible_future<std::optional<PushOp>> ReplicatedRecoveryBackend::_handle_push_reply( pg_shard_t peer, const PushReplyOp &op) { const hobject_t& soid = op.soid; logger().debug("{}, soid {}, from {}", __func__, soid, peer); auto recovering_iter = recovering.find(soid); if (recovering_iter == recovering.end() || !recovering_iter->second->pushing.count(peer)) { logger().debug("huh, i wasn't pushing {} to osd.{}", soid, peer); return seastar::make_ready_future<std::optional<PushOp>>(); } else { auto& push_info = recovering_iter->second->pushing[peer]; bool error = push_info.recovery_progress.error; if (!push_info.recovery_progress.data_complete && !error) { return build_push_op(push_info.recovery_info, push_info.recovery_progress, &push_info.stat ).then_interruptible([&push_info] (auto push_op) { push_info.recovery_progress = push_op.after_progress; return seastar::make_ready_future<std::optional<PushOp>>( std::move(push_op)); }).handle_exception_interruptible( [recovering_iter, &push_info, peer] (auto e) { push_info.recovery_progress.error = true; recovering_iter->second->set_push_failed(peer, e); return seastar::make_ready_future<std::optional<PushOp>>(); }); } if (!error) { pg.get_recovery_handler()->on_peer_recover(peer, soid, push_info.recovery_info); } recovering_iter->second->set_pushed(peer); return seastar::make_ready_future<std::optional<PushOp>>(); } } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::handle_push_reply( Ref<MOSDPGPushReply> m) { logger().debug("{}: {}", __func__, *m); auto from = m->from; auto& push_reply = m->replies[0]; //TODO: only one reply per message return _handle_push_reply(from, push_reply).then_interruptible( [this, from](std::optional<PushOp> push_op) { if (push_op) { auto msg = crimson::make_message<MOSDPGPush>(); msg->from = pg.get_pg_whoami(); msg->pgid = pg.get_pgid(); msg->map_epoch = pg.get_osdmap_epoch(); msg->min_epoch = pg.get_last_peering_reset(); msg->set_priority(pg.get_recovery_op_priority()); msg->pushes.push_back(std::move(*push_op)); return shard_services.send_to_osd(from.osd, std::move(msg), pg.get_osdmap_epoch()); } else { return seastar::make_ready_future<>(); } }); } std::pair<interval_set<uint64_t>, bufferlist> ReplicatedRecoveryBackend::trim_pushed_data( const interval_set<uint64_t> &copy_subset, const interval_set<uint64_t> &intervals_received, ceph::bufferlist data_received) { logger().debug("{}", __func__); // what i have is only a subset of what i want if (intervals_received.subset_of(copy_subset)) { return {intervals_received, data_received}; } // only collect the extents included by copy_subset and intervals_received interval_set<uint64_t> intervals_usable; bufferlist data_usable; intervals_usable.intersection_of(copy_subset, intervals_received); uint64_t have_off = 0; for (auto [have_start, have_len] : intervals_received) { interval_set<uint64_t> want; want.insert(have_start, have_len); want.intersection_of(copy_subset); for (auto [want_start, want_len] : want) { bufferlist sub; uint64_t data_off = have_off + (want_start - have_start); sub.substr_of(data_received, data_off, want_len); data_usable.claim_append(sub); } have_off += have_len; } return {intervals_usable, data_usable}; } RecoveryBackend::interruptible_future<hobject_t> ReplicatedRecoveryBackend::prep_push_target( const ObjectRecoveryInfo& recovery_info, bool first, bool complete, bool clear_omap, ObjectStore::Transaction* t, const map<string, bufferlist, less<>>& attrs, bufferlist&& omap_header) { if (!first) { return seastar::make_ready_future<hobject_t>( get_temp_recovery_object(recovery_info.soid, recovery_info.version)); } ghobject_t target_oid; if (complete) { // overwrite the original object target_oid = ghobject_t(recovery_info.soid); } else { target_oid = ghobject_t(get_temp_recovery_object(recovery_info.soid, recovery_info.version)); logger().debug("{}: Adding oid {} in the temp collection", __func__, target_oid); add_temp_obj(target_oid.hobj); } // create a new object if (!complete || !recovery_info.object_exist) { t->remove(coll->get_cid(), target_oid); t->touch(coll->get_cid(), target_oid); object_info_t oi; oi.decode_no_oid(attrs.at(OI_ATTR)); t->set_alloc_hint(coll->get_cid(), target_oid, oi.expected_object_size, oi.expected_write_size, oi.alloc_hint_flags); } if (complete) { // remove xattr and update later if overwrite on original object t->rmattrs(coll->get_cid(), target_oid); // if need update omap, clear the previous content first if (clear_omap) { t->omap_clear(coll->get_cid(), target_oid); } } t->truncate(coll->get_cid(), target_oid, recovery_info.size); if (omap_header.length()) { t->omap_setheader(coll->get_cid(), target_oid, omap_header); } if (complete || !recovery_info.object_exist) { return seastar::make_ready_future<hobject_t>(target_oid.hobj); } // clone overlap content in local object if using a new object return interruptor::make_interruptible(store->stat(coll, ghobject_t(recovery_info.soid))) .then_interruptible( [this, &recovery_info, t, target_oid] (auto st) { // TODO: pg num bytes counting uint64_t local_size = std::min(recovery_info.size, (uint64_t)st.st_size); interval_set<uint64_t> local_intervals_included, local_intervals_excluded; if (local_size) { local_intervals_included.insert(0, local_size); local_intervals_excluded.intersection_of(local_intervals_included, recovery_info.copy_subset); local_intervals_included.subtract(local_intervals_excluded); } for (auto [off, len] : local_intervals_included) { logger().debug(" clone_range {} {}~{}", recovery_info.soid, off, len); t->clone_range(coll->get_cid(), ghobject_t(recovery_info.soid), target_oid, off, len, off); } return seastar::make_ready_future<hobject_t>(target_oid.hobj); }); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::submit_push_data( const ObjectRecoveryInfo &recovery_info, bool first, bool complete, bool clear_omap, interval_set<uint64_t>&& data_zeros, interval_set<uint64_t>&& intervals_included, bufferlist&& data_included, bufferlist&& omap_header, const map<string, bufferlist, less<>> &attrs, map<string, bufferlist>&& omap_entries, ObjectStore::Transaction *t) { logger().debug("{}", __func__); return prep_push_target(recovery_info, first, complete, clear_omap, t, attrs, std::move(omap_header)).then_interruptible( [this, &recovery_info, t, first, complete, data_zeros=std::move(data_zeros), intervals_included=std::move(intervals_included), data_included=std::move(data_included), omap_entries=std::move(omap_entries), &attrs](auto target_oid) mutable { uint32_t fadvise_flags = CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL; // Punch zeros for data, if fiemap indicates nothing but it is marked dirty if (!data_zeros.empty()) { data_zeros.intersection_of(recovery_info.copy_subset); assert(intervals_included.subset_of(data_zeros)); data_zeros.subtract(intervals_included); logger().debug("submit_push_data recovering object {} copy_subset: {} " "intervals_included: {} data_zeros: {}", recovery_info.soid, recovery_info.copy_subset, intervals_included, data_zeros); for (auto [start, len] : data_zeros) { t->zero(coll->get_cid(), ghobject_t(target_oid), start, len); } } uint64_t off = 0; for (auto [start, len] : intervals_included) { bufferlist bit; bit.substr_of(data_included, off, len); t->write(coll->get_cid(), ghobject_t(target_oid), start, len, bit, fadvise_flags); off += len; } if (!omap_entries.empty()) t->omap_setkeys(coll->get_cid(), ghobject_t(target_oid), omap_entries); if (!attrs.empty()) t->setattrs(coll->get_cid(), ghobject_t(target_oid), attrs); if (complete) { if (!first) { logger().debug("submit_push_data: Removing oid {} from the temp collection", target_oid); clear_temp_obj(target_oid); t->remove(coll->get_cid(), ghobject_t(recovery_info.soid)); t->collection_move_rename(coll->get_cid(), ghobject_t(target_oid), coll->get_cid(), ghobject_t(recovery_info.soid)); } submit_push_complete(recovery_info, t); } logger().debug("submit_push_data: done"); return seastar::make_ready_future<>(); }); } void ReplicatedRecoveryBackend::submit_push_complete( const ObjectRecoveryInfo &recovery_info, ObjectStore::Transaction *t) { for (const auto& [oid, extents] : recovery_info.clone_subset) { for (const auto& [off, len] : extents) { logger().debug(" clone_range {} {}~{}", oid, off, len); t->clone_range(coll->get_cid(), ghobject_t(oid), ghobject_t(recovery_info.soid), off, len, off); } } } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::handle_recovery_delete_reply( Ref<MOSDPGRecoveryDeleteReply> m) { auto& p = m->objects.front(); hobject_t soid = p.first; ObjectRecoveryInfo recovery_info; recovery_info.version = p.second; pg.get_recovery_handler()->on_peer_recover(m->from, soid, recovery_info); get_recovering(soid).set_pushed(m->from); return seastar::now(); } RecoveryBackend::interruptible_future<> ReplicatedRecoveryBackend::handle_recovery_op( Ref<MOSDFastDispatchOp> m, crimson::net::ConnectionRef conn) { switch (m->get_header().type) { case MSG_OSD_PG_PULL: return handle_pull(boost::static_pointer_cast<MOSDPGPull>(m)); case MSG_OSD_PG_PUSH: return handle_push(boost::static_pointer_cast<MOSDPGPush>(m)); case MSG_OSD_PG_PUSH_REPLY: return handle_push_reply( boost::static_pointer_cast<MOSDPGPushReply>(m)); case MSG_OSD_PG_RECOVERY_DELETE: return handle_recovery_delete( boost::static_pointer_cast<MOSDPGRecoveryDelete>(m)); case MSG_OSD_PG_RECOVERY_DELETE_REPLY: return handle_recovery_delete_reply( boost::static_pointer_cast<MOSDPGRecoveryDeleteReply>(m)); default: // delegate to parent class for handling backend-agnostic recovery ops. return RecoveryBackend::handle_recovery_op(std::move(m), conn); } }
44,916
36.968724
100
cc
null
ceph-main/src/crimson/osd/replicated_recovery_backend.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/common/interruptible_future.h" #include "crimson/osd/pg_interval_interrupt_condition.h" #include "crimson/osd/recovery_backend.h" #include "messages/MOSDPGPull.h" #include "messages/MOSDPGPush.h" #include "messages/MOSDPGPushReply.h" #include "messages/MOSDPGRecoveryDelete.h" #include "messages/MOSDPGRecoveryDeleteReply.h" #include "os/ObjectStore.h" class ReplicatedRecoveryBackend : public RecoveryBackend { public: ReplicatedRecoveryBackend(crimson::osd::PG& pg, crimson::osd::ShardServices& shard_services, crimson::os::CollectionRef coll, PGBackend* backend) : RecoveryBackend(pg, shard_services, coll, backend) {} interruptible_future<> handle_recovery_op( Ref<MOSDFastDispatchOp> m, crimson::net::ConnectionRef conn) final; interruptible_future<> recover_object( const hobject_t& soid, eversion_t need) final; interruptible_future<> recover_delete( const hobject_t& soid, eversion_t need) final; interruptible_future<> push_delete( const hobject_t& soid, eversion_t need) final; protected: interruptible_future<> handle_pull( Ref<MOSDPGPull> m); interruptible_future<> handle_pull_response( Ref<MOSDPGPush> m); interruptible_future<> handle_push( Ref<MOSDPGPush> m); interruptible_future<> handle_push_reply( Ref<MOSDPGPushReply> m); interruptible_future<> handle_recovery_delete( Ref<MOSDPGRecoveryDelete> m); interruptible_future<> handle_recovery_delete_reply( Ref<MOSDPGRecoveryDeleteReply> m); interruptible_future<PushOp> prep_push( const hobject_t& soid, eversion_t need, pg_shard_t pg_shard); void prepare_pull( PullOp& pull_op, pull_info_t& pull_info, const hobject_t& soid, eversion_t need); std::vector<pg_shard_t> get_shards_to_push( const hobject_t& soid) const; interruptible_future<PushOp> build_push_op( const ObjectRecoveryInfo& recovery_info, const ObjectRecoveryProgress& progress, object_stat_sum_t* stat); /// @returns true if this push op is the last push op for /// recovery @c pop.soid interruptible_future<bool> _handle_pull_response( pg_shard_t from, PushOp& push_op, PullOp* response, ceph::os::Transaction* t); std::pair<interval_set<uint64_t>, ceph::bufferlist> trim_pushed_data( const interval_set<uint64_t> &copy_subset, const interval_set<uint64_t> &intervals_received, ceph::bufferlist data_received); interruptible_future<> submit_push_data( const ObjectRecoveryInfo &recovery_info, bool first, bool complete, bool clear_omap, interval_set<uint64_t>&& data_zeros, interval_set<uint64_t>&& intervals_included, ceph::bufferlist&& data_included, ceph::bufferlist&& omap_header, const std::map<std::string, bufferlist, std::less<>> &attrs, std::map<std::string, bufferlist>&& omap_entries, ceph::os::Transaction *t); void submit_push_complete( const ObjectRecoveryInfo &recovery_info, ObjectStore::Transaction *t); interruptible_future<> _handle_push( pg_shard_t from, PushOp& push_op, PushReplyOp *response, ceph::os::Transaction *t); interruptible_future<std::optional<PushOp>> _handle_push_reply( pg_shard_t peer, const PushReplyOp &op); interruptible_future<> on_local_recover_persist( const hobject_t& soid, const ObjectRecoveryInfo& _recovery_info, bool is_delete, epoch_t epoch_to_freeze); interruptible_future<> local_recover_delete( const hobject_t& soid, eversion_t need, epoch_t epoch_frozen); seastar::future<> on_stop() final { return seastar::now(); } private: /// pull missing object from peer interruptible_future<> maybe_pull_missing_obj( const hobject_t& soid, eversion_t need); /// load object context for recovery if it is not ready yet using load_obc_ertr = crimson::errorator< crimson::ct_error::object_corrupted>; using load_obc_iertr = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, load_obc_ertr>; interruptible_future<> maybe_push_shards( const hobject_t& soid, eversion_t need); /// read the data attached to given object. the size of them is supposed to /// be relatively small. /// /// @return @c oi.version interruptible_future<eversion_t> read_metadata_for_push_op( const hobject_t& oid, const ObjectRecoveryProgress& progress, ObjectRecoveryProgress& new_progress, eversion_t ver, PushOp* push_op); /// read the remaining extents of object to be recovered and fill push_op /// with them /// /// @param oid object being recovered /// @param copy_subset extents we want /// @param offset the offset in object from where we should read /// @return the new offset interruptible_future<uint64_t> read_object_for_push_op( const hobject_t& oid, const interval_set<uint64_t>& copy_subset, uint64_t offset, uint64_t max_len, PushOp* push_op); interruptible_future<> read_omap_for_push_op( const hobject_t& oid, const ObjectRecoveryProgress& progress, ObjectRecoveryProgress& new_progress, uint64_t& max_len, PushOp* push_op); interruptible_future<hobject_t> prep_push_target( const ObjectRecoveryInfo &recovery_info, bool first, bool complete, bool clear_omap, ObjectStore::Transaction* t, const std::map<std::string, bufferlist, std::less<>> &attrs, bufferlist&& omap_header); using interruptor = crimson::interruptible::interruptor< crimson::osd::IOInterruptCondition>; };
5,714
32.617647
77
h
null
ceph-main/src/crimson/osd/shard_services.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <boost/smart_ptr/make_local_shared.hpp> #include "crimson/osd/shard_services.h" #include "messages/MOSDAlive.h" #include "messages/MOSDMap.h" #include "messages/MOSDPGCreated.h" #include "messages/MOSDPGTemp.h" #include "osd/osd_perf_counters.h" #include "osd/PeeringState.h" #include "crimson/common/config_proxy.h" #include "crimson/mgr/client.h" #include "crimson/mon/MonClient.h" #include "crimson/net/Messenger.h" #include "crimson/net/Connection.h" #include "crimson/os/cyanstore/cyan_store.h" #include "crimson/osd/osdmap_service.h" #include "crimson/osd/osd_operations/pg_advance_map.h" #include "crimson/osd/pg.h" #include "crimson/osd/pg_meta.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } using std::vector; namespace crimson::osd { PerShardState::PerShardState( int whoami, ceph::mono_time startup_time, PerfCounters *perf, PerfCounters *recoverystate_perf, crimson::os::FuturizedStore &store, OSDState &osd_state) : whoami(whoami), store(store.get_sharded_store()), osd_state(osd_state), osdmap_gate("PerShardState::osdmap_gate"), perf(perf), recoverystate_perf(recoverystate_perf), throttler(crimson::common::local_conf()), next_tid( static_cast<ceph_tid_t>(seastar::this_shard_id()) << (std::numeric_limits<ceph_tid_t>::digits - 8)), startup_time(startup_time) {} seastar::future<> PerShardState::dump_ops_in_flight(Formatter *f) const { registry.for_each_op([f](const auto &op) { op.dump(f); }); return seastar::now(); } seastar::future<> PerShardState::stop_pgs() { assert_core(); return seastar::parallel_for_each( pg_map.get_pgs(), [](auto& p) { return p.second->stop(); }); } std::map<pg_t, pg_stat_t> PerShardState::get_pg_stats() const { assert_core(); std::map<pg_t, pg_stat_t> ret; for (auto [pgid, pg] : pg_map.get_pgs()) { if (pg->is_primary()) { auto stats = pg->get_stats(); // todo: update reported_epoch,reported_seq,last_fresh stats.reported_epoch = osdmap->get_epoch(); ret.emplace(pgid.pgid, std::move(stats)); } } return ret; } seastar::future<> PerShardState::broadcast_map_to_pgs( ShardServices &shard_services, epoch_t epoch) { assert_core(); auto &pgs = pg_map.get_pgs(); return seastar::parallel_for_each( pgs.begin(), pgs.end(), [=, &shard_services](auto& pg) { return shard_services.start_operation<PGAdvanceMap>( shard_services, pg.second, epoch, PeeringCtx{}, false).second; }); } Ref<PG> PerShardState::get_pg(spg_t pgid) { assert_core(); return pg_map.get_pg(pgid); } HeartbeatStampsRef PerShardState::get_hb_stamps(int peer) { assert_core(); auto [stamps, added] = heartbeat_stamps.try_emplace(peer); if (added) { stamps->second = ceph::make_ref<HeartbeatStamps>(peer); } return stamps->second; } OSDSingletonState::OSDSingletonState( int whoami, crimson::net::Messenger &cluster_msgr, crimson::net::Messenger &public_msgr, crimson::mon::Client &monc, crimson::mgr::Client &mgrc) : whoami(whoami), cluster_msgr(cluster_msgr), public_msgr(public_msgr), monc(monc), mgrc(mgrc), local_reserver( &cct, &finisher, crimson::common::local_conf()->osd_max_backfills, crimson::common::local_conf()->osd_min_recovery_priority), remote_reserver( &cct, &finisher, crimson::common::local_conf()->osd_max_backfills, crimson::common::local_conf()->osd_min_recovery_priority), snap_reserver( &cct, &finisher, crimson::common::local_conf()->osd_max_trimming_pgs) { crimson::common::local_conf().add_observer(this); osdmaps[0] = boost::make_local_shared<OSDMap>(); perf = build_osd_logger(&cct); cct.get_perfcounters_collection()->add(perf); recoverystate_perf = build_recoverystate_perf(&cct); cct.get_perfcounters_collection()->add(recoverystate_perf); } seastar::future<> OSDSingletonState::send_to_osd( int peer, MessageURef m, epoch_t from_epoch) { if (osdmap->is_down(peer)) { logger().info("{}: osd.{} is_down", __func__, peer); return seastar::now(); } else if (osdmap->get_info(peer).up_from > from_epoch) { logger().info("{}: osd.{} {} > {}", __func__, peer, osdmap->get_info(peer).up_from, from_epoch); return seastar::now(); } else { auto conn = cluster_msgr.connect( osdmap->get_cluster_addrs(peer).front(), CEPH_ENTITY_TYPE_OSD); return conn->send(std::move(m)); } } seastar::future<> OSDSingletonState::osdmap_subscribe( version_t epoch, bool force_request) { logger().info("{}({})", __func__, epoch); if (monc.sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) || force_request) { return monc.renew_subs(); } else { return seastar::now(); } } void OSDSingletonState::queue_want_pg_temp( pg_t pgid, const vector<int>& want, bool forced) { auto p = pg_temp_pending.find(pgid); if (p == pg_temp_pending.end() || p->second.acting != want || forced) { pg_temp_wanted[pgid] = {want, forced}; } } void OSDSingletonState::remove_want_pg_temp(pg_t pgid) { pg_temp_wanted.erase(pgid); pg_temp_pending.erase(pgid); } void OSDSingletonState::requeue_pg_temp() { unsigned old_wanted = pg_temp_wanted.size(); unsigned old_pending = pg_temp_pending.size(); pg_temp_wanted.merge(pg_temp_pending); pg_temp_pending.clear(); logger().debug( "{}: {} + {} -> {}", __func__ , old_wanted, old_pending, pg_temp_wanted.size()); } seastar::future<> OSDSingletonState::send_pg_temp() { if (pg_temp_wanted.empty()) return seastar::now(); logger().debug("{}: {}", __func__, pg_temp_wanted); MURef<MOSDPGTemp> ms[2] = {nullptr, nullptr}; for (auto& [pgid, pg_temp] : pg_temp_wanted) { auto& m = ms[pg_temp.forced]; if (!m) { m = crimson::make_message<MOSDPGTemp>(osdmap->get_epoch()); m->forced = pg_temp.forced; } m->pg_temp.emplace(pgid, pg_temp.acting); } pg_temp_pending.merge(pg_temp_wanted); pg_temp_wanted.clear(); return seastar::parallel_for_each(std::begin(ms), std::end(ms), [this](auto& m) { if (m) { return monc.send_message(std::move(m)); } else { return seastar::now(); } }); } std::ostream& operator<<( std::ostream& out, const OSDSingletonState::pg_temp_t& pg_temp) { out << pg_temp.acting; if (pg_temp.forced) { out << " (forced)"; } return out; } seastar::future<> OSDSingletonState::send_pg_created(pg_t pgid) { logger().debug(__func__); auto o = get_osdmap(); ceph_assert(o->require_osd_release >= ceph_release_t::luminous); pg_created.insert(pgid); return monc.send_message(crimson::make_message<MOSDPGCreated>(pgid)); } seastar::future<> OSDSingletonState::send_pg_created() { logger().debug(__func__); auto o = get_osdmap(); ceph_assert(o->require_osd_release >= ceph_release_t::luminous); return seastar::parallel_for_each(pg_created, [this](auto &pgid) { return monc.send_message(crimson::make_message<MOSDPGCreated>(pgid)); }); } void OSDSingletonState::prune_pg_created() { logger().debug(__func__); auto o = get_osdmap(); auto i = pg_created.begin(); while (i != pg_created.end()) { auto p = o->get_pg_pool(i->pool()); if (!p || !p->has_flag(pg_pool_t::FLAG_CREATING)) { logger().debug("{} pruning {}", __func__, *i); i = pg_created.erase(i); } else { logger().debug(" keeping {}", __func__, *i); ++i; } } } seastar::future<> OSDSingletonState::send_alive(const epoch_t want) { logger().info( "{} want={} up_thru_wanted={}", __func__, want, up_thru_wanted); if (want > up_thru_wanted) { up_thru_wanted = want; } else { logger().debug("{} want={} <= up_thru_wanted={}; skipping", __func__, want, up_thru_wanted); return seastar::now(); } if (!osdmap->exists(whoami)) { logger().warn("{} DNE", __func__); return seastar::now(); } if (const epoch_t up_thru = osdmap->get_up_thru(whoami); up_thru_wanted > up_thru) { logger().debug("{} up_thru_wanted={} up_thru={}", __func__, want, up_thru); return monc.send_message( crimson::make_message<MOSDAlive>(osdmap->get_epoch(), want)); } else { logger().debug("{} {} <= {}", __func__, want, osdmap->get_up_thru(whoami)); return seastar::now(); } } const char** OSDSingletonState::get_tracked_conf_keys() const { static const char* KEYS[] = { "osd_max_backfills", "osd_min_recovery_priority", "osd_max_trimming_pgs", nullptr }; return KEYS; } void OSDSingletonState::handle_conf_change( const ConfigProxy& conf, const std::set <std::string> &changed) { if (changed.count("osd_max_backfills")) { local_reserver.set_max(conf->osd_max_backfills); remote_reserver.set_max(conf->osd_max_backfills); } if (changed.count("osd_min_recovery_priority")) { local_reserver.set_min_priority(conf->osd_min_recovery_priority); remote_reserver.set_min_priority(conf->osd_min_recovery_priority); } if (changed.count("osd_max_trimming_pgs")) { snap_reserver.set_max(conf->osd_max_trimming_pgs); } } seastar::future<OSDSingletonState::local_cached_map_t> OSDSingletonState::get_local_map(epoch_t e) { // TODO: use LRU cache for managing osdmap, fallback to disk if we have to if (auto found = osdmaps.find(e); found) { logger().debug("{} osdmap.{} found in cache", __func__, e); return seastar::make_ready_future<local_cached_map_t>(std::move(found)); } else { logger().debug("{} loading osdmap.{} from disk", __func__, e); return load_map(e).then([e, this](std::unique_ptr<OSDMap> osdmap) { return seastar::make_ready_future<local_cached_map_t>( osdmaps.insert(e, std::move(osdmap))); }); } } void OSDSingletonState::store_map_bl( ceph::os::Transaction& t, epoch_t e, bufferlist&& bl) { meta_coll->store_map(t, e, bl); map_bl_cache.insert(e, std::move(bl)); } seastar::future<bufferlist> OSDSingletonState::load_map_bl( epoch_t e) { if (std::optional<bufferlist> found = map_bl_cache.find(e); found) { logger().debug("{} osdmap.{} found in cache", __func__, e); return seastar::make_ready_future<bufferlist>(*found); } else { logger().debug("{} loading osdmap.{} from disk", __func__, e); return meta_coll->load_map(e); } } seastar::future<std::map<epoch_t, bufferlist>> OSDSingletonState::load_map_bls( epoch_t first, epoch_t last) { logger().debug("{} loading maps [{},{}]", __func__, first, last); ceph_assert(first <= last); return seastar::map_reduce(boost::make_counting_iterator<epoch_t>(first), boost::make_counting_iterator<epoch_t>(last + 1), [this](epoch_t e) { return load_map_bl(e).then([e](auto&& bl) { return seastar::make_ready_future<std::pair<epoch_t, bufferlist>>( std::make_pair(e, std::move(bl))); }); }, std::map<epoch_t, bufferlist>{}, [](auto&& bls, auto&& epoch_bl) { bls.emplace(std::move(epoch_bl)); return std::move(bls); }); } seastar::future<std::unique_ptr<OSDMap>> OSDSingletonState::load_map(epoch_t e) { auto o = std::make_unique<OSDMap>(); logger().info("{} osdmap.{}", __func__, e); if (e == 0) { return seastar::make_ready_future<std::unique_ptr<OSDMap>>(std::move(o)); } return load_map_bl(e).then([o=std::move(o)](bufferlist bl) mutable { o->decode(bl); return seastar::make_ready_future<std::unique_ptr<OSDMap>>(std::move(o)); }); } seastar::future<> OSDSingletonState::store_maps(ceph::os::Transaction& t, epoch_t start, Ref<MOSDMap> m) { return seastar::do_for_each( boost::make_counting_iterator(start), boost::make_counting_iterator(m->get_last() + 1), [&t, m, this](epoch_t e) { if (auto p = m->maps.find(e); p != m->maps.end()) { auto o = std::make_unique<OSDMap>(); o->decode(p->second); logger().info("store_maps storing osdmap.{}", e); store_map_bl(t, e, std::move(std::move(p->second))); osdmaps.insert(e, std::move(o)); return seastar::now(); } else if (auto p = m->incremental_maps.find(e); p != m->incremental_maps.end()) { logger().info("store_maps found osdmap.{} incremental map, " "loading osdmap.{}", e, e - 1); ceph_assert(std::cmp_greater(e, 0u)); return load_map(e - 1).then([e, bl=p->second, &t, this](auto o) { OSDMap::Incremental inc; auto i = bl.cbegin(); inc.decode(i); o->apply_incremental(inc); bufferlist fbl; o->encode(fbl, inc.encode_features | CEPH_FEATURE_RESERVED); logger().info("store_maps storing osdmap.{}", o->get_epoch()); store_map_bl(t, e, std::move(fbl)); osdmaps.insert(e, std::move(o)); return seastar::now(); }); } else { logger().error("MOSDMap lied about what maps it had?"); return seastar::now(); } }); } seastar::future<Ref<PG>> ShardServices::make_pg( OSDMapService::cached_map_t create_map, spg_t pgid, bool do_create) { using ec_profile_t = std::map<std::string, std::string>; auto get_pool_info_for_pg = [create_map, pgid, this] { if (create_map->have_pg_pool(pgid.pool())) { pg_pool_t pi = *create_map->get_pg_pool(pgid.pool()); std::string name = create_map->get_pool_name(pgid.pool()); ec_profile_t ec_profile; if (pi.is_erasure()) { ec_profile = create_map->get_erasure_code_profile( pi.erasure_code_profile); } return seastar::make_ready_future< std::tuple<pg_pool_t,std::string, ec_profile_t> >(std::make_tuple( std::move(pi), std::move(name), std::move(ec_profile))); } else { // pool was deleted; grab final pg_pool_t off disk. return get_pool_info(pgid.pool()); } }; auto get_collection = [pgid, do_create, this] { const coll_t cid{pgid}; if (do_create) { return get_store().create_new_collection(cid); } else { return get_store().open_collection(cid); } }; return seastar::when_all( std::move(get_pool_info_for_pg), std::move(get_collection) ).then([pgid, create_map, this](auto &&ret) { auto [pool, name, ec_profile] = std::move(std::get<0>(ret).get0()); auto coll = std::move(std::get<1>(ret).get0()); return seastar::make_ready_future<Ref<PG>>( new PG{ pgid, pg_shard_t{local_state.whoami, pgid.shard}, std::move(coll), std::move(pool), std::move(name), create_map, *this, ec_profile}); }); } seastar::future<Ref<PG>> ShardServices::handle_pg_create_info( std::unique_ptr<PGCreateInfo> info) { return seastar::do_with( std::move(info), [this](auto &info) -> seastar::future<Ref<PG>> { return get_map(info->epoch).then( [&info, this](cached_map_t startmap) -> seastar::future<std::tuple<Ref<PG>, cached_map_t>> { const spg_t &pgid = info->pgid; if (info->by_mon) { int64_t pool_id = pgid.pgid.pool(); const pg_pool_t *pool = get_map()->get_pg_pool(pool_id); if (!pool) { logger().debug( "{} ignoring pgid {}, pool dne", __func__, pgid); local_state.pg_map.pg_creation_canceled(pgid); return seastar::make_ready_future< std::tuple<Ref<PG>, OSDMapService::cached_map_t> >(std::make_tuple(Ref<PG>(), startmap)); } else if (!pool->is_crimson()) { logger().debug( "{} ignoring pgid {}, pool lacks crimson flag", __func__, pgid); local_state.pg_map.pg_creation_canceled(pgid); return seastar::make_ready_future< std::tuple<Ref<PG>, OSDMapService::cached_map_t> >(std::make_tuple(Ref<PG>(), startmap)); } ceph_assert(get_map()->require_osd_release >= ceph_release_t::octopus); if (!pool->has_flag(pg_pool_t::FLAG_CREATING)) { // this ensures we do not process old creating messages after the // pool's initial pgs have been created (and pg are subsequently // allowed to split or merge). logger().debug( "{} dropping {} create, pool does not have CREATING flag set", __func__, pgid); local_state.pg_map.pg_creation_canceled(pgid); return seastar::make_ready_future< std::tuple<Ref<PG>, OSDMapService::cached_map_t> >(std::make_tuple(Ref<PG>(), startmap)); } } return make_pg( startmap, pgid, true ).then([startmap=std::move(startmap)](auto pg) mutable { return seastar::make_ready_future< std::tuple<Ref<PG>, OSDMapService::cached_map_t> >(std::make_tuple(std::move(pg), std::move(startmap))); }); }).then([this, &info](auto &&ret) ->seastar::future<Ref<PG>> { auto [pg, startmap] = std::move(ret); if (!pg) return seastar::make_ready_future<Ref<PG>>(Ref<PG>()); const pg_pool_t* pp = startmap->get_pg_pool(info->pgid.pool()); int up_primary, acting_primary; vector<int> up, acting; startmap->pg_to_up_acting_osds( info->pgid.pgid, &up, &up_primary, &acting, &acting_primary); int role = startmap->calc_pg_role( pg_shard_t(local_state.whoami, info->pgid.shard), acting); PeeringCtx rctx; create_pg_collection( rctx.transaction, info->pgid, info->pgid.get_split_bits(pp->get_pg_num())); init_pg_ondisk( rctx.transaction, info->pgid, pp); pg->init( role, up, up_primary, acting, acting_primary, info->history, info->past_intervals, rctx.transaction); return start_operation<PGAdvanceMap>( *this, pg, get_map()->get_epoch(), std::move(rctx), true ).second.then([pg=pg] { return seastar::make_ready_future<Ref<PG>>(pg); }); }); }); } ShardServices::get_or_create_pg_ret ShardServices::get_or_create_pg( PGMap::PGCreationBlockingEvent::TriggerI&& trigger, spg_t pgid, std::unique_ptr<PGCreateInfo> info) { if (info) { auto [fut, creating] = local_state.pg_map.wait_for_pg( std::move(trigger), pgid); if (!creating) { local_state.pg_map.set_creating(pgid); (void)handle_pg_create_info( std::move(info)); } return std::move(fut); } else { return get_or_create_pg_ret( get_or_create_pg_ertr::ready_future_marker{}, local_state.pg_map.get_pg(pgid)); } } ShardServices::wait_for_pg_ret ShardServices::wait_for_pg( PGMap::PGCreationBlockingEvent::TriggerI&& trigger, spg_t pgid) { return local_state.pg_map.wait_for_pg(std::move(trigger), pgid).first; } seastar::future<Ref<PG>> ShardServices::load_pg(spg_t pgid) { logger().debug("{}: {}", __func__, pgid); return seastar::do_with(PGMeta(get_store(), pgid), [](auto& pg_meta) { return pg_meta.get_epoch(); }).then([this](epoch_t e) { return get_map(e); }).then([pgid, this](auto&& create_map) { return make_pg(std::move(create_map), pgid, false); }).then([this](Ref<PG> pg) { return pg->read_state(&get_store()).then([pg] { return seastar::make_ready_future<Ref<PG>>(std::move(pg)); }); }).handle_exception([pgid](auto ep) { logger().info("pg {} saw exception on load {}", pgid, ep); ceph_abort("Could not load pg" == 0); return seastar::make_exception_future<Ref<PG>>(ep); }); } seastar::future<> ShardServices::dispatch_context_transaction( crimson::os::CollectionRef col, PeeringCtx &ctx) { if (ctx.transaction.empty()) { logger().debug("ShardServices::dispatch_context_transaction: empty transaction"); return seastar::now(); } logger().debug("ShardServices::dispatch_context_transaction: do_transaction ..."); auto ret = get_store().do_transaction( col, std::move(ctx.transaction)); ctx.reset_transaction(); return ret; } seastar::future<> ShardServices::dispatch_context_messages( BufferedRecoveryMessages &&ctx) { auto ret = seastar::parallel_for_each(std::move(ctx.message_map), [this](auto& osd_messages) { auto& [peer, messages] = osd_messages; logger().debug("dispatch_context_messages sending messages to {}", peer); return seastar::parallel_for_each( std::move(messages), [=, peer=peer, this](auto& m) { return send_to_osd(peer, std::move(m), local_state.osdmap->get_epoch()); }); }); ctx.message_map.clear(); return ret; } seastar::future<> ShardServices::dispatch_context( crimson::os::CollectionRef col, PeeringCtx &&ctx) { ceph_assert(col || ctx.transaction.empty()); return seastar::when_all_succeed( dispatch_context_messages( BufferedRecoveryMessages{ctx}), col ? dispatch_context_transaction(col, ctx) : seastar::now() ).then_unpack([] { return seastar::now(); }); } seastar::future<> OSDSingletonState::send_incremental_map( crimson::net::Connection &conn, epoch_t first) { logger().info("{}: first osdmap: {} " "superblock's oldest map: {}", __func__, first, superblock.oldest_map); if (first >= superblock.oldest_map) { return load_map_bls( first, superblock.newest_map ).then([this, &conn, first](auto&& bls) { auto m = crimson::make_message<MOSDMap>( monc.get_fsid(), osdmap->get_encoding_features()); m->cluster_osdmap_trim_lower_bound = first; m->newest_map = superblock.newest_map; m->maps = std::move(bls); return conn.send(std::move(m)); }); } else { return load_map_bl(osdmap->get_epoch() ).then([this, &conn](auto&& bl) mutable { auto m = crimson::make_message<MOSDMap>( monc.get_fsid(), osdmap->get_encoding_features()); /* TODO: once we support the tracking of superblock's * cluster_osdmap_trim_lower_bound, the MOSDMap should * be populated with this value instead of the oldest_map. * See: OSD::handle_osd_map for how classic updates the * cluster's trim lower bound. */ m->cluster_osdmap_trim_lower_bound = superblock.oldest_map; m->newest_map = superblock.newest_map; m->maps.emplace(osdmap->get_epoch(), std::move(bl)); return conn.send(std::move(m)); }); } } seastar::future<> OSDSingletonState::send_incremental_map_to_osd( int osd, epoch_t first) { if (osdmap->is_down(osd)) { logger().info("{}: osd.{} is_down", __func__, osd); return seastar::now(); } else { auto conn = cluster_msgr.connect( osdmap->get_cluster_addrs(osd).front(), CEPH_ENTITY_TYPE_OSD); return send_incremental_map(*conn, first); } } };
22,449
28.461942
85
cc
null
ceph-main/src/crimson/osd/shard_services.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <memory> #include <boost/intrusive_ptr.hpp> #include <seastar/core/future.hh> #include "include/common_fwd.h" #include "osd_operation.h" #include "msg/MessageRef.h" #include "crimson/common/exception.h" #include "crimson/common/shared_lru.h" #include "crimson/os/futurized_collection.h" #include "osd/PeeringState.h" #include "crimson/osd/osdmap_service.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_meta.h" #include "crimson/osd/object_context.h" #include "crimson/osd/pg_map.h" #include "crimson/osd/state.h" #include "common/AsyncReserver.h" #include "crimson/net/Connection.h" namespace crimson::net { class Messenger; } namespace crimson::mgr { class Client; } namespace crimson::mon { class Client; } namespace crimson::os { class FuturizedStore; } class OSDMap; class PeeringCtx; class BufferedRecoveryMessages; namespace crimson::osd { class PGShardManager; /** * PerShardState * * Per-shard state holding instances local to each shard. */ class PerShardState { friend class ShardServices; friend class PGShardManager; friend class OSD; using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; const core_id_t core = seastar::this_shard_id(); #define assert_core() ceph_assert(seastar::this_shard_id() == core); const int whoami; crimson::os::FuturizedStore::Shard &store; crimson::common::CephContext cct; OSDState &osd_state; OSD_OSDMapGate osdmap_gate; PerfCounters *perf = nullptr; PerfCounters *recoverystate_perf = nullptr; // Op Management OSDOperationRegistry registry; OperationThrottler throttler; seastar::future<> dump_ops_in_flight(Formatter *f) const; epoch_t up_epoch = 0; OSDMapService::cached_map_t osdmap; const auto &get_osdmap() const { assert_core(); return osdmap; } void update_map(OSDMapService::cached_map_t new_osdmap) { assert_core(); osdmap = std::move(new_osdmap); } void set_up_epoch(epoch_t epoch) { assert_core(); up_epoch = epoch; } // prevent creating new osd operations when system is shutting down, // this is necessary because there are chances that a new operation // is created, after the interruption of all ongoing operations, and // creats and waits on a new and may-never-resolve future, in which // case the shutdown may never succeed. bool stopping = false; seastar::future<> stop_registry() { assert_core(); crimson::get_logger(ceph_subsys_osd).info("PerShardState::{}", __func__); stopping = true; return registry.stop(); } // PGMap state PGMap pg_map; seastar::future<> stop_pgs(); std::map<pg_t, pg_stat_t> get_pg_stats() const; seastar::future<> broadcast_map_to_pgs( ShardServices &shard_services, epoch_t epoch); Ref<PG> get_pg(spg_t pgid); template <typename F> void for_each_pg(F &&f) const { assert_core(); for (auto &pg : pg_map.get_pgs()) { std::invoke(f, pg.first, pg.second); } } template <typename T, typename... Args> auto start_operation(Args&&... args) { assert_core(); if (__builtin_expect(stopping, false)) { throw crimson::common::system_shutdown_exception(); } auto op = registry.create_operation<T>(std::forward<Args>(args)...); crimson::get_logger(ceph_subsys_osd).info( "PerShardState::{}, {}", __func__, *op); auto fut = seastar::yield().then([op] { return op->start().finally([op /* by copy */] { // ensure the op's lifetime is appropriate. It is not enough to // guarantee it's alive at the scheduling stages (i.e. `then()` // calling) but also during the actual execution (i.e. when passed // lambdas are actually run). }); }); return std::make_pair(std::move(op), std::move(fut)); } template <typename InterruptorT, typename T, typename... Args> auto start_operation_may_interrupt(Args&&... args) { assert_core(); if (__builtin_expect(stopping, false)) { throw crimson::common::system_shutdown_exception(); } auto op = registry.create_operation<T>(std::forward<Args>(args)...); crimson::get_logger(ceph_subsys_osd).info( "PerShardState::{}, {}", __func__, *op); auto fut = InterruptorT::make_interruptible( seastar::yield() ).then_interruptible([op] { return op->start().finally([op /* by copy */] { // ensure the op's lifetime is appropriate. It is not enough to // guarantee it's alive at the scheduling stages (i.e. `then()` // calling) but also during the actual execution (i.e. when passed // lambdas are actually run). }); }); return std::make_pair(std::move(op), std::move(fut)); } // tids for ops i issue, prefixed with core id to ensure uniqueness ceph_tid_t next_tid; ceph_tid_t get_tid() { assert_core(); return next_tid++; } HeartbeatStampsRef get_hb_stamps(int peer); std::map<int, HeartbeatStampsRef> heartbeat_stamps; // Time state const ceph::mono_time startup_time; ceph::signedspan get_mnow() const { assert_core(); return ceph::mono_clock::now() - startup_time; } public: PerShardState( int whoami, ceph::mono_time startup_time, PerfCounters *perf, PerfCounters *recoverystate_perf, crimson::os::FuturizedStore &store, OSDState& osd_state); }; /** * OSDSingletonState * * OSD-wide singleton holding instances that need to be accessible * from all PGs. */ class OSDSingletonState : public md_config_obs_t { friend class ShardServices; friend class PGShardManager; friend class OSD; using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; public: OSDSingletonState( int whoami, crimson::net::Messenger &cluster_msgr, crimson::net::Messenger &public_msgr, crimson::mon::Client &monc, crimson::mgr::Client &mgrc); private: const int whoami; crimson::common::CephContext cct; PerfCounters *perf = nullptr; PerfCounters *recoverystate_perf = nullptr; SharedLRU<epoch_t, OSDMap> osdmaps; SimpleLRU<epoch_t, bufferlist, false> map_bl_cache; cached_map_t osdmap; cached_map_t &get_osdmap() { return osdmap; } void update_map(cached_map_t new_osdmap) { osdmap = std::move(new_osdmap); } crimson::net::Messenger &cluster_msgr; crimson::net::Messenger &public_msgr; seastar::future<> send_to_osd(int peer, MessageURef m, epoch_t from_epoch); crimson::mon::Client &monc; seastar::future<> osdmap_subscribe(version_t epoch, bool force_request); crimson::mgr::Client &mgrc; std::unique_ptr<OSDMeta> meta_coll; template <typename... Args> void init_meta_coll(Args&&... args) { meta_coll = std::make_unique<OSDMeta>(std::forward<Args>(args)...); } OSDMeta &get_meta_coll() { assert(meta_coll); return *meta_coll; } OSDSuperblock superblock; void set_superblock(OSDSuperblock _superblock) { superblock = std::move(_superblock); } seastar::future<> send_incremental_map( crimson::net::Connection &conn, epoch_t first); seastar::future<> send_incremental_map_to_osd(int osd, epoch_t first); auto get_pool_info(int64_t poolid) { return get_meta_coll().load_final_pool_info(poolid); } // global pg temp state struct pg_temp_t { std::vector<int> acting; bool forced = false; }; std::map<pg_t, pg_temp_t> pg_temp_wanted; std::map<pg_t, pg_temp_t> pg_temp_pending; friend std::ostream& operator<<(std::ostream&, const pg_temp_t&); void queue_want_pg_temp(pg_t pgid, const std::vector<int>& want, bool forced = false); void remove_want_pg_temp(pg_t pgid); void requeue_pg_temp(); seastar::future<> send_pg_temp(); std::set<pg_t> pg_created; seastar::future<> send_pg_created(pg_t pgid); seastar::future<> send_pg_created(); void prune_pg_created(); struct DirectFinisher { void queue(Context *c) { c->complete(0); } } finisher; AsyncReserver<spg_t, DirectFinisher> local_reserver; AsyncReserver<spg_t, DirectFinisher> remote_reserver; AsyncReserver<spg_t, DirectFinisher> snap_reserver; epoch_t up_thru_wanted = 0; seastar::future<> send_alive(epoch_t want); const char** get_tracked_conf_keys() const final; void handle_conf_change( const ConfigProxy& conf, const std::set <std::string> &changed) final; seastar::future<local_cached_map_t> get_local_map(epoch_t e); seastar::future<std::unique_ptr<OSDMap>> load_map(epoch_t e); seastar::future<bufferlist> load_map_bl(epoch_t e); seastar::future<std::map<epoch_t, bufferlist>> load_map_bls(epoch_t first, epoch_t last); void store_map_bl(ceph::os::Transaction& t, epoch_t e, bufferlist&& bl); seastar::future<> store_maps(ceph::os::Transaction& t, epoch_t start, Ref<MOSDMap> m); }; /** * Represents services available to each PG */ class ShardServices : public OSDMapService { friend class PGShardManager; friend class OSD; using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; PerShardState local_state; seastar::sharded<OSDSingletonState> &osd_singleton_state; PGShardMapping& pg_to_shard_mapping; template <typename F, typename... Args> auto with_singleton(F &&f, Args&&... args) { return osd_singleton_state.invoke_on( PRIMARY_CORE, std::forward<F>(f), std::forward<Args>(args)... ); } #define FORWARD_CONST(FROM_METHOD, TO_METHOD, TARGET) \ template <typename... Args> \ auto FROM_METHOD(Args&&... args) const { \ return TARGET.TO_METHOD(std::forward<Args>(args)...); \ } #define FORWARD(FROM_METHOD, TO_METHOD, TARGET) \ template <typename... Args> \ auto FROM_METHOD(Args&&... args) { \ return TARGET.TO_METHOD(std::forward<Args>(args)...); \ } #define FORWARD_TO_LOCAL(METHOD) FORWARD(METHOD, METHOD, local_state) #define FORWARD_TO_LOCAL_CONST(METHOD) FORWARD_CONST( \ METHOD, METHOD, local_state) \ #define FORWARD_TO_OSD_SINGLETON_TARGET(METHOD, TARGET) \ template <typename... Args> \ auto METHOD(Args&&... args) { \ return with_singleton( \ [](auto &local_state, auto&&... args) { \ return local_state.TARGET( \ std::forward<decltype(args)>(args)...); \ }, std::forward<Args>(args)...); \ } #define FORWARD_TO_OSD_SINGLETON(METHOD) \ FORWARD_TO_OSD_SINGLETON_TARGET(METHOD, METHOD) public: template <typename... PSSArgs> ShardServices( seastar::sharded<OSDSingletonState> &osd_singleton_state, PGShardMapping& pg_to_shard_mapping, PSSArgs&&... args) : local_state(std::forward<PSSArgs>(args)...), osd_singleton_state(osd_singleton_state), pg_to_shard_mapping(pg_to_shard_mapping) {} FORWARD_TO_OSD_SINGLETON(send_to_osd) crimson::os::FuturizedStore::Shard &get_store() { return local_state.store; } auto remove_pg(spg_t pgid) { local_state.pg_map.remove_pg(pgid); return pg_to_shard_mapping.remove_pg(pgid); } crimson::common::CephContext *get_cct() { return &(local_state.cct); } template <typename T, typename... Args> auto start_operation(Args&&... args) { return local_state.start_operation<T>(std::forward<Args>(args)...); } template <typename InterruptorT, typename T, typename... Args> auto start_operation_may_interrupt(Args&&... args) { return local_state.start_operation_may_interrupt< InterruptorT, T>(std::forward<Args>(args)...); } auto &get_registry() { return local_state.registry; } // Loggers PerfCounters &get_recoverystate_perf_logger() { return *local_state.recoverystate_perf; } PerfCounters &get_perf_logger() { return *local_state.perf; } // Diagnostics FORWARD_TO_LOCAL_CONST(dump_ops_in_flight); // Local PG Management seastar::future<Ref<PG>> make_pg( cached_map_t create_map, spg_t pgid, bool do_create); seastar::future<Ref<PG>> handle_pg_create_info( std::unique_ptr<PGCreateInfo> info); using get_or_create_pg_ertr = PGMap::wait_for_pg_ertr; using get_or_create_pg_ret = get_or_create_pg_ertr::future<Ref<PG>>; get_or_create_pg_ret get_or_create_pg( PGMap::PGCreationBlockingEvent::TriggerI&&, spg_t pgid, std::unique_ptr<PGCreateInfo> info); using wait_for_pg_ertr = PGMap::wait_for_pg_ertr; using wait_for_pg_ret = wait_for_pg_ertr::future<Ref<PG>>; wait_for_pg_ret wait_for_pg( PGMap::PGCreationBlockingEvent::TriggerI&&, spg_t pgid); seastar::future<Ref<PG>> load_pg(spg_t pgid); /// Dispatch and reset ctx transaction seastar::future<> dispatch_context_transaction( crimson::os::CollectionRef col, PeeringCtx &ctx); /// Dispatch and reset ctx messages seastar::future<> dispatch_context_messages( BufferedRecoveryMessages &&ctx); /// Dispatch ctx and dispose of context seastar::future<> dispatch_context( crimson::os::CollectionRef col, PeeringCtx &&ctx); /// Dispatch ctx and dispose of ctx, transaction must be empty seastar::future<> dispatch_context( PeeringCtx &&ctx) { return dispatch_context({}, std::move(ctx)); } /// Return per-core tid ceph_tid_t get_tid() { return local_state.get_tid(); } /// Return core-local pg count * number of cores unsigned get_num_local_pgs() const { return local_state.pg_map.get_pg_count(); } // OSDMapService cached_map_t get_map() const final { return local_state.get_osdmap(); } epoch_t get_up_epoch() const final { return local_state.up_epoch; } seastar::future<cached_map_t> get_map(epoch_t e) final { return with_singleton( [](auto &sstate, epoch_t e) { return sstate.get_local_map( e ).then([](auto lmap) { return seastar::foreign_ptr<local_cached_map_t>(lmap); }); }, e).then([](auto fmap) { return make_local_shared_foreign(std::move(fmap)); }); } FORWARD_TO_OSD_SINGLETON(get_pool_info) FORWARD(with_throttle_while, with_throttle_while, local_state.throttler) FORWARD_TO_OSD_SINGLETON(send_incremental_map) FORWARD_TO_OSD_SINGLETON(send_incremental_map_to_osd) FORWARD_TO_OSD_SINGLETON(osdmap_subscribe) FORWARD_TO_OSD_SINGLETON(queue_want_pg_temp) FORWARD_TO_OSD_SINGLETON(remove_want_pg_temp) FORWARD_TO_OSD_SINGLETON(requeue_pg_temp) FORWARD_TO_OSD_SINGLETON(send_pg_created) FORWARD_TO_OSD_SINGLETON(send_alive) FORWARD_TO_OSD_SINGLETON(send_pg_temp) FORWARD_TO_LOCAL_CONST(get_mnow) FORWARD_TO_LOCAL(get_hb_stamps) FORWARD(pg_created, pg_created, local_state.pg_map) FORWARD_TO_OSD_SINGLETON_TARGET( local_update_priority, local_reserver.update_priority) FORWARD_TO_OSD_SINGLETON_TARGET( local_cancel_reservation, local_reserver.cancel_reservation) FORWARD_TO_OSD_SINGLETON_TARGET( local_dump_reservations, local_reserver.dump) FORWARD_TO_OSD_SINGLETON_TARGET( remote_cancel_reservation, remote_reserver.cancel_reservation) FORWARD_TO_OSD_SINGLETON_TARGET( remote_dump_reservations, remote_reserver.dump) FORWARD_TO_OSD_SINGLETON_TARGET( snap_cancel_reservation, snap_reserver.cancel_reservation) FORWARD_TO_OSD_SINGLETON_TARGET( snap_dump_reservations, snap_reserver.dump) Context *invoke_context_on_core(core_id_t core, Context *c) { if (!c) return nullptr; return new LambdaContext([core, c](int code) { std::ignore = seastar::smp::submit_to( core, [c, code] { c->complete(code); }); }); } seastar::future<> local_request_reservation( spg_t item, Context *on_reserved, unsigned prio, Context *on_preempt) { return with_singleton( [item, prio](OSDSingletonState &singleton, Context *wrapped_on_reserved, Context *wrapped_on_preempt) { return singleton.local_reserver.request_reservation( item, wrapped_on_reserved, prio, wrapped_on_preempt); }, invoke_context_on_core(seastar::this_shard_id(), on_reserved), invoke_context_on_core(seastar::this_shard_id(), on_preempt)); } seastar::future<> remote_request_reservation( spg_t item, Context *on_reserved, unsigned prio, Context *on_preempt) { return with_singleton( [item, prio](OSDSingletonState &singleton, Context *wrapped_on_reserved, Context *wrapped_on_preempt) { return singleton.remote_reserver.request_reservation( item, wrapped_on_reserved, prio, wrapped_on_preempt); }, invoke_context_on_core(seastar::this_shard_id(), on_reserved), invoke_context_on_core(seastar::this_shard_id(), on_preempt)); } seastar::future<> snap_request_reservation( spg_t item, Context *on_reserved, unsigned prio) { return with_singleton( [item, prio](OSDSingletonState &singleton, Context *wrapped_on_reserved) { return singleton.snap_reserver.request_reservation( item, wrapped_on_reserved, prio); }, invoke_context_on_core(seastar::this_shard_id(), on_reserved)); } #undef FORWARD_CONST #undef FORWARD #undef FORWARD_TO_OSD_SINGLETON #undef FORWARD_TO_LOCAL #undef FORWARD_TO_LOCAL_CONST }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::OSDSingletonState::pg_temp_t> : fmt::ostream_formatter {}; #endif
17,439
28.509306
106
h
null
ceph-main/src/crimson/osd/state.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <string_view> #include <ostream> #include <seastar/core/shared_future.hh> class OSDMap; namespace crimson::osd { // seastar::sharded puts start_single on core 0 constexpr core_id_t PRIMARY_CORE = 0; /** * OSDState * * Maintains state representing the OSD's progress from booting through * shutdown. * * Shards other than PRIMARY_CORE may use their local instance to check * on ACTIVE and STOPPING. All other methods are restricted to * PRIMARY_CORE (such methods start with an assert to this effect). */ class OSDState : public seastar::peering_sharded_service<OSDState> { enum class State { INITIALIZING, PREBOOT, BOOTING, ACTIVE, PRESTOP, STOPPING, WAITING_FOR_HEALTHY, }; State state = State::INITIALIZING; mutable seastar::shared_promise<> wait_for_active; /// Sets local instance state to active, called from set_active void _set_active() { state = State::ACTIVE; wait_for_active.set_value(); wait_for_active = {}; } /// Sets local instance state to stopping, called from set_stopping void _set_stopping() { state = State::STOPPING; wait_for_active.set_exception(crimson::common::system_shutdown_exception{}); wait_for_active = {}; } public: bool is_initializing() const { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::INITIALIZING; } bool is_preboot() const { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::PREBOOT; } bool is_booting() const { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::BOOTING; } bool is_active() const { return state == State::ACTIVE; } seastar::future<> when_active() const { return is_active() ? seastar::now() : wait_for_active.get_shared_future(); }; bool is_prestop() const { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::PRESTOP; } bool is_stopping() const { return state == State::STOPPING; } bool is_waiting_for_healthy() const { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::WAITING_FOR_HEALTHY; } void set_preboot() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); state = State::PREBOOT; } void set_booting() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); state = State::BOOTING; } /// Sets all shards to active seastar::future<> set_active() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return container().invoke_on_all([](auto& osd_state) { osd_state._set_active(); }); } void set_prestop() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); state = State::PRESTOP; } /// Sets all shards to stopping seastar::future<> set_stopping() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return container().invoke_on_all([](auto& osd_state) { osd_state._set_stopping(); }); } std::string_view to_string() const { switch (state) { case State::INITIALIZING: return "initializing"; case State::PREBOOT: return "preboot"; case State::BOOTING: return "booting"; case State::ACTIVE: return "active"; case State::PRESTOP: return "prestop"; case State::STOPPING: return "stopping"; case State::WAITING_FOR_HEALTHY: return "waiting_for_healthy"; default: return "???"; } } }; inline std::ostream& operator<<(std::ostream& os, const OSDState& s) { return os << s.to_string(); } }
3,658
26.931298
80
h
null
ceph-main/src/crimson/osd/stop_signal.h
/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2020 Cloudius Systems, Ltd. */ #pragma once #include <seastar/core/abort_source.hh> #include <seastar/core/reactor.hh> #include <seastar/core/condition-variable.hh> /// Seastar apps lib namespace namespace seastar_apps_lib { /// \brief Futurized SIGINT/SIGTERM signals handler class /// /// Seastar-style helper class that allows easy waiting for SIGINT/SIGTERM signals /// from your app. /// /// Example: /// \code /// #include <seastar/apps/lib/stop_signal.hh> /// ... /// int main() { /// ... /// seastar::thread th([] { /// seastar_apps_lib::stop_signal stop_signal; /// <some code> /// stop_signal.wait().get(); // this will wait till we receive SIGINT or SIGTERM signal /// }); /// \endcode class stop_signal { seastar::condition_variable _cond; seastar::abort_source _abort_source; private: void on_signal() { if (stopping()) { return; } _abort_source.request_abort(); _cond.broadcast(); } public: stop_signal() { seastar::engine().handle_signal(SIGINT, [this] { on_signal(); }); seastar::engine().handle_signal(SIGTERM, [this] { on_signal(); }); } ~stop_signal() { // There's no way to unregister a handler yet, so register a no-op handler instead. seastar::engine().handle_signal(SIGINT, [] {}); seastar::engine().handle_signal(SIGTERM, [] {}); } seastar::future<> wait() { return _cond.wait([this] { return _abort_source.abort_requested(); }); } bool stopping() const { return _abort_source.abort_requested(); } auto& abort_source() { return _abort_source; } }; }
2,412
27.72619
92
h
null
ceph-main/src/crimson/osd/watch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <algorithm> #include <boost/range/adaptor/transformed.hpp> #include <boost/range/algorithm_ext/insert.hpp> #include "crimson/osd/watch.h" #include "crimson/osd/osd_operations/internal_client_request.h" #include "messages/MWatchNotify.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { // a watcher can remove itself if it has not seen a notification after a period of time. // in the case, we need to drop it also from the persisted `ObjectState` instance. // this operation resembles a bit the `_UNWATCH` subop. class WatchTimeoutRequest final : public InternalClientRequest { public: WatchTimeoutRequest(WatchRef watch, Ref<PG> pg) : InternalClientRequest(std::move(pg)), watch(std::move(watch)) { } const hobject_t& get_target_oid() const final; PG::do_osd_ops_params_t get_do_osd_ops_params() const final; std::vector<OSDOp> create_osd_ops() final; private: WatchRef watch; }; const hobject_t& WatchTimeoutRequest::get_target_oid() const { assert(watch->obc); return watch->obc->get_oid(); } PG::do_osd_ops_params_t WatchTimeoutRequest::get_do_osd_ops_params() const { osd_reqid_t reqid; reqid.name = watch->entity_name; PG::do_osd_ops_params_t params{ watch->conn, reqid, ceph_clock_now(), get_pg().get_osdmap_epoch(), entity_inst_t{ watch->entity_name, watch->winfo.addr }, 0 }; logger().debug("{}: params.reqid={}", __func__, params.reqid); return params; } std::vector<OSDOp> WatchTimeoutRequest::create_osd_ops() { logger().debug("{}", __func__); assert(watch); OSDOp osd_op; osd_op.op.op = CEPH_OSD_OP_WATCH; osd_op.op.flags = 0; osd_op.op.watch.op = CEPH_OSD_WATCH_OP_UNWATCH; osd_op.op.watch.cookie = watch->winfo.cookie; return std::vector{std::move(osd_op)}; } Watch::~Watch() { logger().debug("{} gid={} cookie={}", __func__, get_watcher_gid(), get_cookie()); } seastar::future<> Watch::connect(crimson::net::ConnectionRef conn, bool) { if (this->conn == conn) { logger().debug("conn={} already connected", conn); return seastar::now(); } timeout_timer.cancel(); timeout_timer.arm(std::chrono::seconds{winfo.timeout_seconds}); this->conn = std::move(conn); return seastar::now(); } void Watch::disconnect() { ceph_assert(!conn); timeout_timer.cancel(); timeout_timer.arm(std::chrono::seconds{winfo.timeout_seconds}); } seastar::future<> Watch::send_notify_msg(NotifyRef notify) { logger().info("{} for notify(id={})", __func__, notify->ninfo.notify_id); return conn->send(crimson::make_message<MWatchNotify>( winfo.cookie, notify->user_version, notify->ninfo.notify_id, CEPH_WATCH_EVENT_NOTIFY, notify->ninfo.bl, notify->client_gid)); } seastar::future<> Watch::start_notify(NotifyRef notify) { logger().debug("{} gid={} cookie={} starting notify(id={})", __func__, get_watcher_gid(), get_cookie(), notify->ninfo.notify_id); auto [ it, emplaced ] = in_progress_notifies.emplace(std::move(notify)); ceph_assert(emplaced); ceph_assert(is_alive()); return is_connected() ? send_notify_msg(*it) : seastar::now(); } seastar::future<> Watch::notify_ack( const uint64_t notify_id, const ceph::bufferlist& reply_bl) { logger().debug("{} gid={} cookie={} notify_id={}", __func__, get_watcher_gid(), get_cookie(), notify_id); const auto it = in_progress_notifies.find(notify_id); if (it == std::end(in_progress_notifies)) { logger().error("{} notify_id={} not found on the in-progess list." " Supressing but this should not happen.", __func__, notify_id); return seastar::now(); } auto notify = *it; logger().debug("Watch::notify_ack gid={} cookie={} found notify(id={})", get_watcher_gid(), get_cookie(), notify->get_id()); // let's ensure we're extending the life-time till end of this method static_assert(std::is_same_v<decltype(notify), NotifyRef>); in_progress_notifies.erase(it); return notify->complete_watcher(shared_from_this(), reply_bl); } seastar::future<> Watch::send_disconnect_msg() { if (!is_connected()) { return seastar::now(); } ceph::bufferlist empty; return conn->send(crimson::make_message<MWatchNotify>( winfo.cookie, 0, 0, CEPH_WATCH_EVENT_DISCONNECT, empty)); } void Watch::discard_state() { logger().debug("{} gid={} cookie={}", __func__, get_watcher_gid(), get_cookie()); ceph_assert(obc); in_progress_notifies.clear(); timeout_timer.cancel(); } void Watch::got_ping(utime_t) { if (is_connected()) { // using cancel() + arm() as rearm() has no overload for time delta. timeout_timer.cancel(); timeout_timer.arm(std::chrono::seconds{winfo.timeout_seconds}); } } seastar::future<> Watch::remove() { logger().debug("{} gid={} cookie={}", __func__, get_watcher_gid(), get_cookie()); // in contrast to ceph-osd crimson sends CEPH_WATCH_EVENT_DISCONNECT directly // from the timeout handler and _after_ CEPH_WATCH_EVENT_NOTIFY_COMPLETE. // this simplifies the Watch::remove() interface as callers aren't obliged // anymore to decide whether EVENT_DISCONNECT needs to be send or not -- it // becomes an implementation detail of Watch. return seastar::do_for_each(in_progress_notifies, [this_shared=shared_from_this()] (auto notify) { logger().debug("Watch::remove gid={} cookie={} notify(id={})", this_shared->get_watcher_gid(), this_shared->get_cookie(), notify->ninfo.notify_id); return notify->remove_watcher(this_shared); }).then([this] { discard_state(); return seastar::now(); }); } void Watch::cancel_notify(const uint64_t notify_id) { logger().debug("{} gid={} cookie={} notify(id={})", __func__, get_watcher_gid(), get_cookie(), notify_id); const auto it = in_progress_notifies.find(notify_id); assert(it != std::end(in_progress_notifies)); in_progress_notifies.erase(it); } void Watch::do_watch_timeout() { assert(pg); auto [op, fut] = pg->get_shard_services().start_operation<WatchTimeoutRequest>( shared_from_this(), pg); std::ignore = std::move(fut).then([op=std::move(op), this] { return send_disconnect_msg(); }); } bool notify_reply_t::operator<(const notify_reply_t& rhs) const { // comparing std::pairs to emphasize our legacy. ceph-osd stores // notify_replies as std::multimap<std::pair<gid, cookie>, bl>. // unfortunately, what seems to be an implementation detail, got // exposed as part of our public API (the `reply_buffer` parameter // of the `rados_notify` family). const auto lhsp = std::make_pair(watcher_gid, watcher_cookie); const auto rhsp = std::make_pair(rhs.watcher_gid, rhs.watcher_cookie); return lhsp < rhsp; } std::ostream &operator<<(std::ostream &out, const notify_reply_t &rhs) { out << "notify_reply_t{watcher_gid=" << rhs.watcher_gid << ", watcher_cookie=" << rhs.watcher_cookie << "}"; return out; } Notify::Notify(crimson::net::ConnectionRef conn, const notify_info_t& ninfo, const uint64_t client_gid, const uint64_t user_version) : ninfo(ninfo), conn(std::move(conn)), client_gid(client_gid), user_version(user_version) {} Notify::~Notify() { logger().debug("{} for notify(id={})", __func__, ninfo.notify_id); } seastar::future<> Notify::remove_watcher(WatchRef watch) { logger().debug("{} for notify(id={})", __func__, ninfo.notify_id); if (discarded || complete) { logger().debug("{} for notify(id={}) discarded/complete already" " discarded: {} complete: {}", __func__, ninfo.notify_id, discarded ,complete); return seastar::now(); } [[maybe_unused]] const auto num_removed = watchers.erase(watch); assert(num_removed > 0); if (watchers.empty()) { complete = true; [[maybe_unused]] bool was_armed = timeout_timer.cancel(); assert(was_armed); return send_completion(); } else { return seastar::now(); } } seastar::future<> Notify::complete_watcher( WatchRef watch, const ceph::bufferlist& reply_bl) { logger().debug("{} for notify(id={})", __func__, ninfo.notify_id); if (discarded || complete) { logger().debug("{} for notify(id={}) discarded/complete already" " discarded: {} complete: {}", __func__, ninfo.notify_id, discarded ,complete); return seastar::now(); } notify_replies.emplace(notify_reply_t{ watch->get_watcher_gid(), watch->get_cookie(), reply_bl}); return remove_watcher(std::move(watch)); } seastar::future<> Notify::send_completion( std::set<WatchRef> timedout_watchers) { logger().info("{} -- {} in progress watchers, timedout watchers {}", __func__, watchers.size(), timedout_watchers.size()); logger().debug("{} sending notify replies: {}", __func__, notify_replies); ceph::bufferlist empty; auto reply = crimson::make_message<MWatchNotify>( ninfo.cookie, user_version, ninfo.notify_id, CEPH_WATCH_EVENT_NOTIFY_COMPLETE, empty, client_gid); ceph::bufferlist reply_bl; { std::vector<std::pair<uint64_t,uint64_t>> missed; missed.reserve(std::size(timedout_watchers)); boost::insert( missed, std::begin(missed), timedout_watchers | boost::adaptors::transformed([] (auto w) { return std::make_pair(w->get_watcher_gid(), w->get_cookie()); })); ceph::encode(notify_replies, reply_bl); ceph::encode(missed, reply_bl); } reply->set_data(std::move(reply_bl)); if (!timedout_watchers.empty()) { reply->return_code = -ETIMEDOUT; } return conn->send(std::move(reply)); } void Notify::do_notify_timeout() { logger().debug("{} complete={}", __func__, complete); if (complete) { return; } // it might be that `this` is kept alive only because of the reference // a watcher stores and which is being removed by `cancel_notify()`. // to avoid use-after-free we bump up the ref counter with `guard_ptr`. [[maybe_unused]] auto guard_ptr = shared_from_this(); for (auto& watcher : watchers) { logger().debug("canceling watcher cookie={} gid={} use_count={}", watcher->get_cookie(), watcher->get_watcher_gid(), watcher->use_count()); watcher->cancel_notify(ninfo.notify_id); } std::ignore = send_completion(std::move(watchers)); watchers.clear(); } } // namespace crimson::osd #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::WatchTimeoutRequest> : fmt::ostream_formatter {}; #endif
10,816
29.470423
97
cc
null
ceph-main/src/crimson/osd/watch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <iterator> #include <map> #include <set> #include <seastar/core/shared_ptr.hh> #include "crimson/net/Connection.h" #include "crimson/osd/object_context.h" #include "crimson/osd/pg.h" #include "include/denc.h" namespace crimson::osd { class Notify; using NotifyRef = seastar::shared_ptr<Notify>; // NOTE: really need to have this public. Otherwise `shared_from_this()` // will abort. According to cppreference.com: // // "The constructors of std::shared_ptr detect the presence // of an unambiguous and accessible (ie. public inheritance // is mandatory) (since C++17) enable_shared_from_this base". // // I expect the `seastar::shared_ptr` shares this behaviour. class Watch : public seastar::enable_shared_from_this<Watch> { // this is a private tag for the public constructor that turns it into // de facto private one. The motivation behind the hack is make_shared // used by create(). struct private_ctag_t{}; std::set<NotifyRef, std::less<>> in_progress_notifies; crimson::net::ConnectionRef conn; crimson::osd::ObjectContextRef obc; watch_info_t winfo; entity_name_t entity_name; Ref<PG> pg; seastar::timer<seastar::lowres_clock> timeout_timer; seastar::future<> start_notify(NotifyRef); seastar::future<> send_notify_msg(NotifyRef); seastar::future<> send_disconnect_msg(); friend Notify; friend class WatchTimeoutRequest; public: Watch(private_ctag_t, crimson::osd::ObjectContextRef obc, const watch_info_t& winfo, const entity_name_t& entity_name, Ref<PG> pg) : obc(std::move(obc)), winfo(winfo), entity_name(entity_name), pg(std::move(pg)), timeout_timer([this] { return do_watch_timeout(); }) { assert(this->pg); } ~Watch(); seastar::future<> connect(crimson::net::ConnectionRef, bool); void disconnect(); bool is_alive() const { return true; } bool is_connected() const { return static_cast<bool>(conn); } void got_ping(utime_t); void discard_state(); seastar::future<> remove(); /// Call when notify_ack received on notify_id seastar::future<> notify_ack( uint64_t notify_id, ///< [in] id of acked notify const ceph::bufferlist& reply_bl); ///< [in] notify reply buffer template <class... Args> static seastar::shared_ptr<Watch> create(Args&&... args) { return seastar::make_shared<Watch>(private_ctag_t{}, std::forward<Args>(args)...); }; uint64_t get_watcher_gid() const { return entity_name.num(); } auto get_pg() const { return pg; } auto& get_entity() const { return entity_name; } auto& get_cookie() const { return winfo.cookie; } auto& get_peer_addr() const { return winfo.addr; } void cancel_notify(const uint64_t notify_id); void do_watch_timeout(); }; using WatchRef = seastar::shared_ptr<Watch>; struct notify_reply_t { uint64_t watcher_gid; uint64_t watcher_cookie; ceph::bufferlist bl; bool operator<(const notify_reply_t& rhs) const; DENC(notify_reply_t, v, p) { // there is no versioning / preamble denc(v.watcher_gid, p); denc(v.watcher_cookie, p); denc(v.bl, p); } }; std::ostream &operator<<(std::ostream &out, const notify_reply_t &rhs); class Notify : public seastar::enable_shared_from_this<Notify> { std::set<WatchRef> watchers; const notify_info_t ninfo; crimson::net::ConnectionRef conn; const uint64_t client_gid; const uint64_t user_version; bool complete{false}; bool discarded{false}; seastar::timer<seastar::lowres_clock> timeout_timer{ [this] { do_notify_timeout(); } }; ~Notify(); /// (gid,cookie) -> reply_bl for everyone who acked the notify std::multiset<notify_reply_t> notify_replies; uint64_t get_id() const { return ninfo.notify_id; } /// Sends notify completion if watchers.empty() or timeout seastar::future<> send_completion( std::set<WatchRef> timedout_watchers = {}); /// Called on Notify timeout void do_notify_timeout(); Notify(crimson::net::ConnectionRef conn, const notify_info_t& ninfo, const uint64_t client_gid, const uint64_t user_version); template <class WatchIteratorT> Notify(WatchIteratorT begin, WatchIteratorT end, crimson::net::ConnectionRef conn, const notify_info_t& ninfo, const uint64_t client_gid, const uint64_t user_version); // this is a private tag for the public constructor that turns it into // de facto private one. The motivation behind the hack is make_shared // used by create_n_propagate factory. struct private_ctag_t{}; using ptr_t = seastar::shared_ptr<Notify>; friend bool operator<(const ptr_t& lhs, const ptr_t& rhs) { assert(lhs); assert(rhs); return lhs->get_id() < rhs->get_id(); } friend bool operator<(const ptr_t& ptr, const uint64_t id) { assert(ptr); return ptr->get_id() < id; } friend bool operator<(const uint64_t id, const ptr_t& ptr) { assert(ptr); return id < ptr->get_id(); } friend Watch; public: template <class... Args> Notify(private_ctag_t, Args&&... args) : Notify(std::forward<Args>(args)...) { } template <class WatchIteratorT, class... Args> static seastar::future<> create_n_propagate( WatchIteratorT begin, WatchIteratorT end, Args&&... args); seastar::future<> remove_watcher(WatchRef watch); seastar::future<> complete_watcher(WatchRef watch, const ceph::bufferlist& reply_bl); }; template <class WatchIteratorT> Notify::Notify(WatchIteratorT begin, WatchIteratorT end, crimson::net::ConnectionRef conn, const notify_info_t& ninfo, const uint64_t client_gid, const uint64_t user_version) : watchers(begin, end), ninfo(ninfo), conn(std::move(conn)), client_gid(client_gid), user_version(user_version) { assert(!std::empty(watchers)); if (ninfo.timeout) { timeout_timer.arm(std::chrono::seconds{ninfo.timeout}); } } template <class WatchIteratorT, class... Args> seastar::future<> Notify::create_n_propagate( WatchIteratorT begin, WatchIteratorT end, Args&&... args) { static_assert( std::is_same_v<typename std::iterator_traits<WatchIteratorT>::value_type, crimson::osd::WatchRef>); if (begin == end) { auto notify = seastar::make_shared<Notify>( private_ctag_t{}, std::forward<Args>(args)...); return notify->send_completion(); } else { auto notify = seastar::make_shared<Notify>( private_ctag_t{}, begin, end, std::forward<Args>(args)...); return seastar::do_for_each(begin, end, [=] (auto& watchref) { return watchref->start_notify(notify); }); } } } // namespace crimson::osd WRITE_CLASS_DENC(crimson::osd::notify_reply_t) #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::notify_reply_t> : fmt::ostream_formatter {}; #endif
7,146
26.809339
92
h
null
ceph-main/src/crimson/osd/osd_operations/background_recovery.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <seastar/core/future.hh> #include <seastar/core/sleep.hh> #include "messages/MOSDOp.h" #include "crimson/osd/pg.h" #include "crimson/osd/shard_services.h" #include "common/Formatter.h" #include "crimson/osd/osd_operation_external_tracking.h" #include "crimson/osd/osd_operations/background_recovery.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson { template <> struct EventBackendRegistry<osd::UrgentRecovery> { static std::tuple<> get_backends() { return {}; } }; template <> struct EventBackendRegistry<osd::PglogBasedRecovery> { static std::tuple<> get_backends() { return {}; } }; } namespace crimson::osd { template <class T> BackgroundRecoveryT<T>::BackgroundRecoveryT( Ref<PG> pg, ShardServices &ss, epoch_t epoch_started, crimson::osd::scheduler::scheduler_class_t scheduler_class, float delay) : pg(pg), epoch_started(epoch_started), delay(delay), ss(ss), scheduler_class(scheduler_class) {} template <class T> void BackgroundRecoveryT<T>::print(std::ostream &lhs) const { lhs << "BackgroundRecovery(" << pg->get_pgid() << ")"; } template <class T> void BackgroundRecoveryT<T>::dump_detail(Formatter *f) const { f->dump_stream("pgid") << pg->get_pgid(); f->open_object_section("recovery_detail"); { // TODO pg->dump_recovery_state(f); } f->close_section(); } template <class T> seastar::future<> BackgroundRecoveryT<T>::start() { logger().debug("{}: start", *this); typename T::IRef ref = static_cast<T*>(this); auto maybe_delay = seastar::now(); if (delay) { maybe_delay = seastar::sleep( std::chrono::milliseconds(std::lround(delay * 1000))); } return maybe_delay.then([ref, this] { return this->template with_blocking_event<OperationThrottler::BlockingEvent>( [ref, this] (auto&& trigger) { return ss.with_throttle_while( std::move(trigger), this, get_scheduler_params(), [this] { return T::interruptor::with_interruption([this] { return do_recovery(); }, [](std::exception_ptr) { return seastar::make_ready_future<bool>(false); }, pg); }).handle_exception_type([ref, this](const std::system_error& err) { if (err.code() == std::make_error_code(std::errc::interrupted)) { logger().debug("{} recovery interruped: {}", *pg, err.what()); return seastar::now(); } return seastar::make_exception_future<>(err); }); }); }); } UrgentRecovery::UrgentRecovery( const hobject_t& soid, const eversion_t& need, Ref<PG> pg, ShardServices& ss, epoch_t epoch_started) : BackgroundRecoveryT{pg, ss, epoch_started, crimson::osd::scheduler::scheduler_class_t::immediate}, soid{soid}, need(need) { } UrgentRecovery::interruptible_future<bool> UrgentRecovery::do_recovery() { logger().debug("{}: {}", __func__, *this); if (!pg->has_reset_since(epoch_started)) { return with_blocking_event<RecoveryBackend::RecoveryBlockingEvent, interruptor>([this] (auto&& trigger) { return pg->get_recovery_handler()->recover_missing(trigger, soid, need); }).then_interruptible([] { return seastar::make_ready_future<bool>(false); }); } return seastar::make_ready_future<bool>(false); } void UrgentRecovery::print(std::ostream &lhs) const { lhs << "UrgentRecovery(" << pg->get_pgid() << ", " << soid << ", v" << need << ", epoch_started: " << epoch_started << ")"; } void UrgentRecovery::dump_detail(Formatter *f) const { f->dump_stream("pgid") << pg->get_pgid(); f->open_object_section("recovery_detail"); { f->dump_stream("oid") << soid; f->dump_stream("version") << need; } f->close_section(); } PglogBasedRecovery::PglogBasedRecovery( Ref<PG> pg, ShardServices &ss, const epoch_t epoch_started, float delay) : BackgroundRecoveryT( std::move(pg), ss, epoch_started, crimson::osd::scheduler::scheduler_class_t::background_recovery, delay) {} PglogBasedRecovery::interruptible_future<bool> PglogBasedRecovery::do_recovery() { if (pg->has_reset_since(epoch_started)) { return seastar::make_ready_future<bool>(false); } return with_blocking_event<RecoveryBackend::RecoveryBlockingEvent, interruptor>([this] (auto&& trigger) { return pg->get_recovery_handler()->start_recovery_ops( trigger, crimson::common::local_conf()->osd_recovery_max_single_start); }); } PGPeeringPipeline &BackfillRecovery::peering_pp(PG &pg) { return pg.peering_request_pg_pipeline; } BackfillRecovery::interruptible_future<bool> BackfillRecovery::do_recovery() { logger().debug("{}", __func__); if (pg->has_reset_since(epoch_started)) { logger().debug("{}: pg got reset since epoch_started={}", __func__, epoch_started); return seastar::make_ready_future<bool>(false); } // TODO: limits return enter_stage<interruptor>( // process_event() of our boost::statechart machine is non-reentrant. // with the backfill_pipeline we protect it from a second entry from // the implementation of BackfillListener. // additionally, this stage serves to synchronize with PeeringEvent. peering_pp(*pg).process ).then_interruptible([this] { pg->get_recovery_handler()->dispatch_backfill_event(std::move(evt)); return seastar::make_ready_future<bool>(false); }); } template class BackgroundRecoveryT<UrgentRecovery>; template class BackgroundRecoveryT<PglogBasedRecovery>; template class BackgroundRecoveryT<BackfillRecovery>; } // namespace crimson::osd
5,825
27.009615
81
cc
null
ceph-main/src/crimson/osd/osd_operations/background_recovery.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <boost/statechart/event_base.hpp> #include "crimson/net/Connection.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/recovery_backend.h" #include "crimson/common/type_helpers.h" #include "crimson/osd/osd_operations/peering_event.h" #include "crimson/osd/pg.h" namespace crimson::osd { class PG; class ShardServices; template <class T> class BackgroundRecoveryT : public PhasedOperationT<T> { public: static constexpr OperationTypeCode type = OperationTypeCode::background_recovery; BackgroundRecoveryT( Ref<PG> pg, ShardServices &ss, epoch_t epoch_started, crimson::osd::scheduler::scheduler_class_t scheduler_class, float delay = 0); virtual void print(std::ostream &) const; seastar::future<> start(); protected: Ref<PG> pg; const epoch_t epoch_started; float delay = 0; private: virtual void dump_detail(Formatter *f) const; crimson::osd::scheduler::params_t get_scheduler_params() const { return { 1, // cost 0, // owner scheduler_class }; } using do_recovery_ret_t = typename PhasedOperationT<T>::template interruptible_future<bool>; virtual do_recovery_ret_t do_recovery() = 0; ShardServices &ss; const crimson::osd::scheduler::scheduler_class_t scheduler_class; }; /// represent a recovery initiated for serving a client request /// /// unlike @c PglogBasedRecovery and @c BackfillRecovery, /// @c UrgentRecovery is not throttled by the scheduler. and it /// utilizes @c RecoveryBackend directly to recover the unreadable /// object. class UrgentRecovery final : public BackgroundRecoveryT<UrgentRecovery> { public: UrgentRecovery( const hobject_t& soid, const eversion_t& need, Ref<PG> pg, ShardServices& ss, epoch_t epoch_started); void print(std::ostream&) const final; std::tuple< OperationThrottler::BlockingEvent, RecoveryBackend::RecoveryBlockingEvent > tracking_events; private: void dump_detail(Formatter* f) const final; interruptible_future<bool> do_recovery() override; const hobject_t soid; const eversion_t need; }; class PglogBasedRecovery final : public BackgroundRecoveryT<PglogBasedRecovery> { public: PglogBasedRecovery( Ref<PG> pg, ShardServices &ss, epoch_t epoch_started, float delay = 0); std::tuple< OperationThrottler::BlockingEvent, RecoveryBackend::RecoveryBlockingEvent > tracking_events; private: interruptible_future<bool> do_recovery() override; }; class BackfillRecovery final : public BackgroundRecoveryT<BackfillRecovery> { public: template <class EventT> BackfillRecovery( Ref<PG> pg, ShardServices &ss, epoch_t epoch_started, const EventT& evt); PipelineHandle& get_handle() { return handle; } std::tuple< OperationThrottler::BlockingEvent, PGPeeringPipeline::Process::BlockingEvent > tracking_events; private: boost::intrusive_ptr<const boost::statechart::event_base> evt; PipelineHandle handle; static PGPeeringPipeline &peering_pp(PG &pg); interruptible_future<bool> do_recovery() override; }; template <class EventT> BackfillRecovery::BackfillRecovery( Ref<PG> pg, ShardServices &ss, const epoch_t epoch_started, const EventT& evt) : BackgroundRecoveryT( std::move(pg), ss, epoch_started, crimson::osd::scheduler::scheduler_class_t::background_best_effort), evt(evt.intrusive_from_this()) {} } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::BackfillRecovery> : fmt::ostream_formatter {}; template <> struct fmt::formatter<crimson::osd::PglogBasedRecovery> : fmt::ostream_formatter {}; template <> struct fmt::formatter<crimson::osd::UrgentRecovery> : fmt::ostream_formatter {}; template <class T> struct fmt::formatter<crimson::osd::BackgroundRecoveryT<T>> : fmt::ostream_formatter {}; #endif
3,950
26.248276
107
h
null
ceph-main/src/crimson/osd/osd_operations/client_request.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #include "messages/MOSDOp.h" #include "messages/MOSDOpReply.h" #include "crimson/common/exception.h" #include "crimson/osd/pg.h" #include "crimson/osd/osd.h" #include "common/Formatter.h" #include "crimson/osd/osd_operation_external_tracking.h" #include "crimson/osd/osd_operations/client_request.h" #include "crimson/osd/osd_connection_priv.h" #include "osd/object_state_fmt.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { void ClientRequest::Orderer::requeue( ShardServices &shard_services, Ref<PG> pg) { for (auto &req: list) { logger().debug("{}: {} requeueing {}", __func__, *pg, req); req.reset_instance_handle(); std::ignore = req.with_pg_int(shard_services, pg); } } void ClientRequest::Orderer::clear_and_cancel() { for (auto i = list.begin(); i != list.end(); ) { logger().debug( "ClientRequest::Orderer::clear_and_cancel: {}", *i); i->complete_request(); remove_request(*(i++)); } } void ClientRequest::complete_request() { track_event<CompletionEvent>(); on_complete.set_value(); } ClientRequest::ClientRequest( ShardServices &shard_services, crimson::net::ConnectionRef conn, Ref<MOSDOp> &&m) : put_historic_shard_services(&shard_services), conn(std::move(conn)), m(std::move(m)), instance_handle(new instance_handle_t) {} ClientRequest::~ClientRequest() { logger().debug("{}: destroying", *this); } void ClientRequest::print(std::ostream &lhs) const { lhs << "m=[" << *m << "]"; } void ClientRequest::dump_detail(Formatter *f) const { logger().debug("{}: dumping", *this); std::apply([f] (auto... event) { (..., event.dump(f)); }, tracking_events); } ConnectionPipeline &ClientRequest::get_connection_pipeline() { return get_osd_priv(conn.get()).client_request_conn_pipeline; } ClientRequest::PGPipeline &ClientRequest::client_pp(PG &pg) { return pg.request_pg_pipeline; } bool ClientRequest::is_pg_op() const { return std::any_of( begin(m->ops), end(m->ops), [](auto& op) { return ceph_osd_op_type_pg(op.op.op); }); } seastar::future<> ClientRequest::with_pg_int( ShardServices &shard_services, Ref<PG> pgref) { epoch_t same_interval_since = pgref->get_interval_start_epoch(); logger().debug("{} same_interval_since: {}", *this, same_interval_since); if (m->finish_decode()) { m->clear_payload(); } const auto this_instance_id = instance_id++; OperationRef opref{this}; auto instance_handle = get_instance_handle(); auto &ihref = *instance_handle; return interruptor::with_interruption( [this, pgref, this_instance_id, &ihref, &shard_services]() mutable { PG &pg = *pgref; if (pg.can_discard_op(*m)) { return shard_services.send_incremental_map( std::ref(*conn), m->get_map_epoch() ).then([this, this_instance_id, pgref] { logger().debug("{}.{}: discarding", *this, this_instance_id); pgref->client_request_orderer.remove_request(*this); complete_request(); return interruptor::now(); }); } return ihref.enter_stage<interruptor>(client_pp(pg).await_map, *this ).then_interruptible([this, this_instance_id, &pg, &ihref] { logger().debug("{}.{}: after await_map stage", *this, this_instance_id); return ihref.enter_blocker( *this, pg.osdmap_gate, &decltype(pg.osdmap_gate)::wait_for_map, m->get_min_epoch(), nullptr); }).then_interruptible([this, this_instance_id, &pg, &ihref](auto map) { logger().debug("{}.{}: after wait_for_map", *this, this_instance_id); return ihref.enter_stage<interruptor>(client_pp(pg).wait_for_active, *this); }).then_interruptible([this, this_instance_id, &pg, &ihref]() { logger().debug( "{}.{}: after wait_for_active stage", *this, this_instance_id); return ihref.enter_blocker( *this, pg.wait_for_active_blocker, &decltype(pg.wait_for_active_blocker)::wait); }).then_interruptible([this, pgref, this_instance_id, &ihref]() mutable -> interruptible_future<> { logger().debug( "{}.{}: after wait_for_active", *this, this_instance_id); if (is_pg_op()) { return process_pg_op(pgref); } else { return process_op(ihref, pgref); } }).then_interruptible([this, this_instance_id, pgref] { logger().debug("{}.{}: after process*", *this, this_instance_id); pgref->client_request_orderer.remove_request(*this); complete_request(); }); }, [this, this_instance_id, pgref](std::exception_ptr eptr) { // TODO: better debug output logger().debug("{}.{}: interrupted {}", *this, this_instance_id, eptr); }, pgref).finally( [opref=std::move(opref), pgref=std::move(pgref), instance_handle=std::move(instance_handle), &ihref] { ihref.handle.exit(); }); } seastar::future<> ClientRequest::with_pg( ShardServices &shard_services, Ref<PG> pgref) { put_historic_shard_services = &shard_services; pgref->client_request_orderer.add_request(*this); auto ret = on_complete.get_future(); std::ignore = with_pg_int( shard_services, std::move(pgref) ); return ret; } ClientRequest::interruptible_future<> ClientRequest::process_pg_op( Ref<PG> &pg) { return pg->do_pg_ops( m ).then_interruptible([this, pg=std::move(pg)](MURef<MOSDOpReply> reply) { return conn->send(std::move(reply)); }); } auto ClientRequest::reply_op_error(const Ref<PG>& pg, int err) { logger().debug("{}: replying with error {}", *this, err); auto reply = crimson::make_message<MOSDOpReply>( m.get(), err, pg->get_osdmap_epoch(), m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK), !m->has_flag(CEPH_OSD_FLAG_RETURNVEC)); reply->set_reply_versions(eversion_t(), 0); reply->set_op_returns(std::vector<pg_log_op_return_item_t>{}); return conn->send(std::move(reply)); } ClientRequest::interruptible_future<> ClientRequest::process_op(instance_handle_t &ihref, Ref<PG> &pg) { return ihref.enter_stage<interruptor>( client_pp(*pg).recover_missing, *this ).then_interruptible( [this, pg]() mutable { if (pg->is_primary()) { return do_recover_missing(pg, m->get_hobj()); } else { logger().debug("process_op: Skipping do_recover_missing" "on non primary pg"); return interruptor::now(); } }).then_interruptible([this, pg, &ihref]() mutable { return pg->already_complete(m->get_reqid()).then_interruptible( [this, pg, &ihref](auto completed) mutable -> PG::load_obc_iertr::future<> { if (completed) { auto reply = crimson::make_message<MOSDOpReply>( m.get(), completed->err, pg->get_osdmap_epoch(), CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK, false); reply->set_reply_versions(completed->version, completed->user_version); return conn->send(std::move(reply)); } else { return ihref.enter_stage<interruptor>(client_pp(*pg).get_obc, *this ).then_interruptible( [this, pg, &ihref]() mutable -> PG::load_obc_iertr::future<> { logger().debug("{}: in get_obc stage", *this); op_info.set_from_op(&*m, *pg->get_osdmap()); return pg->with_locked_obc( m->get_hobj(), op_info, [this, pg, &ihref](auto obc) mutable { logger().debug("{}: got obc {}", *this, obc->obs); return ihref.enter_stage<interruptor>( client_pp(*pg).process, *this ).then_interruptible([this, pg, obc, &ihref]() mutable { return do_process(ihref, pg, obc); }); }); }); } }); }).handle_error_interruptible( PG::load_obc_ertr::all_same_way([this, pg=std::move(pg)](const auto &code) { logger().error("ClientRequest saw error code {}", code); assert(code.value() > 0); return reply_op_error(pg, -code.value()); })); } ClientRequest::interruptible_future<> ClientRequest::do_process( instance_handle_t &ihref, Ref<PG>& pg, crimson::osd::ObjectContextRef obc) { if (m->has_flag(CEPH_OSD_FLAG_PARALLELEXEC)) { return reply_op_error(pg, -EINVAL); } const pg_pool_t pool = pg->get_pgpool().info; if (pool.has_flag(pg_pool_t::FLAG_EIO)) { // drop op on the floor; the client will handle returning EIO if (m->has_flag(CEPH_OSD_FLAG_SUPPORTSPOOLEIO)) { logger().debug("discarding op due to pool EIO flag"); return seastar::now(); } else { logger().debug("replying EIO due to pool EIO flag"); return reply_op_error(pg, -EIO); } } if (m->get_oid().name.size() > crimson::common::local_conf()->osd_max_object_name_len) { return reply_op_error(pg, -ENAMETOOLONG); } else if (m->get_hobj().get_key().size() > crimson::common::local_conf()->osd_max_object_name_len) { return reply_op_error(pg, -ENAMETOOLONG); } else if (m->get_hobj().nspace.size() > crimson::common::local_conf()->osd_max_object_namespace_len) { return reply_op_error(pg, -ENAMETOOLONG); } else if (m->get_hobj().oid.name.empty()) { return reply_op_error(pg, -EINVAL); } else if (pg->get_osdmap()->is_blocklisted(conn->get_peer_addr())) { logger().info("{} is blocklisted", conn->get_peer_addr()); return reply_op_error(pg, -EBLOCKLISTED); } if (!obc->obs.exists && !op_info.may_write()) { return reply_op_error(pg, -ENOENT); } SnapContext snapc = get_snapc(pg,obc); if ((m->has_flag(CEPH_OSD_FLAG_ORDERSNAP)) && snapc.seq < obc->ssc->snapset.seq) { logger().debug("{} ORDERSNAP flag set and snapc seq {}", " < snapset seq {} on {}", __func__, snapc.seq, obc->ssc->snapset.seq, obc->obs.oi.soid); return reply_op_error(pg, -EOLDSNAPC); } if (!pg->is_primary()) { // primary can handle both normal ops and balanced reads if (is_misdirected(*pg)) { logger().trace("do_process: dropping misdirected op"); return seastar::now(); } else if (const hobject_t& hoid = m->get_hobj(); !pg->get_peering_state().can_serve_replica_read(hoid)) { logger().debug("{}: unstable write on replica, " "bouncing to primary", __func__); return reply_op_error(pg, -EAGAIN); } else { logger().debug("{}: serving replica read on oid {}", __func__, m->get_hobj()); } } return pg->do_osd_ops(m, conn, obc, op_info, snapc).safe_then_unpack_interruptible( [this, pg, &ihref](auto submitted, auto all_completed) mutable { return submitted.then_interruptible([this, pg, &ihref] { return ihref.enter_stage<interruptor>(client_pp(*pg).wait_repop, *this); }).then_interruptible( [this, pg, all_completed=std::move(all_completed), &ihref]() mutable { return all_completed.safe_then_interruptible( [this, pg, &ihref](MURef<MOSDOpReply> reply) { return ihref.enter_stage<interruptor>(client_pp(*pg).send_reply, *this ).then_interruptible( [this, reply=std::move(reply)]() mutable { logger().debug("{}: sending response", *this); return conn->send(std::move(reply)); }); }, crimson::ct_error::eagain::handle([this, pg, &ihref]() mutable { return process_op(ihref, pg); })); }); }, crimson::ct_error::eagain::handle([this, pg, &ihref]() mutable { return process_op(ihref, pg); })); } bool ClientRequest::is_misdirected(const PG& pg) const { // otherwise take a closer look if (const int flags = m->get_flags(); flags & CEPH_OSD_FLAG_BALANCE_READS || flags & CEPH_OSD_FLAG_LOCALIZE_READS) { if (!op_info.may_read()) { // no read found, so it can't be balanced read return true; } if (op_info.may_write() || op_info.may_cache()) { // write op, but i am not primary return true; } // balanced reads; any replica will do return false; } // neither balanced nor localize reads return true; } void ClientRequest::put_historic() const { ceph_assert_always(put_historic_shard_services); put_historic_shard_services->get_registry().put_historic(*this); } const SnapContext ClientRequest::get_snapc( Ref<PG>& pg, crimson::osd::ObjectContextRef obc) const { SnapContext snapc; if (op_info.may_write() || op_info.may_cache()) { // snap if (pg->get_pgpool().info.is_pool_snaps_mode()) { // use pool's snapc snapc = pg->get_pgpool().snapc; logger().debug("{} using pool's snapc snaps={}", __func__, snapc.snaps); } else { // client specified snapc snapc.seq = m->get_snap_seq(); snapc.snaps = m->get_snaps(); logger().debug("{} client specified snapc seq={} snaps={}", __func__, snapc.seq, snapc.snaps); } } return snapc; } }
12,887
32.131105
85
cc
null
ceph-main/src/crimson/osd/osd_operations/client_request.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/future.hh> #include <boost/intrusive/list.hpp> #include <boost/intrusive_ptr.hpp> #include "osd/osd_op_util.h" #include "crimson/net/Connection.h" #include "crimson/osd/object_context.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/osd_operations/client_request_common.h" #include "crimson/osd/osd_operations/common/pg_pipeline.h" #include "crimson/osd/pg_activation_blocker.h" #include "crimson/osd/pg_map.h" #include "crimson/common/type_helpers.h" #include "crimson/common/utility.h" #include "messages/MOSDOp.h" namespace crimson::osd { class PG; class OSD; class ShardServices; class ClientRequest final : public PhasedOperationT<ClientRequest>, private CommonClientRequest { // Initially set to primary core, updated to pg core after move, // used by put_historic ShardServices *put_historic_shard_services = nullptr; crimson::net::ConnectionRef conn; // must be after conn due to ConnectionPipeline's life-time Ref<MOSDOp> m; OpInfo op_info; seastar::promise<> on_complete; unsigned instance_id = 0; public: class PGPipeline : public CommonPGPipeline { public: struct AwaitMap : OrderedExclusivePhaseT<AwaitMap> { static constexpr auto type_name = "ClientRequest::PGPipeline::await_map"; } await_map; struct WaitRepop : OrderedConcurrentPhaseT<WaitRepop> { static constexpr auto type_name = "ClientRequest::PGPipeline::wait_repop"; } wait_repop; struct SendReply : OrderedExclusivePhaseT<SendReply> { static constexpr auto type_name = "ClientRequest::PGPipeline::send_reply"; } send_reply; friend class ClientRequest; friend class LttngBackend; friend class HistoricBackend; friend class ReqRequest; friend class LogMissingRequest; friend class LogMissingRequestReply; }; /** * instance_handle_t * * Client request is, at present, the only Operation which can be requeued. * This is, mostly, fine. However, reusing the PipelineHandle or * BlockingEvent structures before proving that the prior instance has stopped * can create hangs or crashes due to violations of the BlockerT and * PipelineHandle invariants. * * To solve this, we create an instance_handle_t which contains the events * for the portion of execution that can be rerun as well as the * PipelineHandle. ClientRequest::with_pg_int grabs a reference to the current * instance_handle_t and releases its PipelineHandle in the finally block. * On requeue, we create a new instance_handle_t with a fresh PipelineHandle * and events tuple and use it and use it for the next invocation of * with_pg_int. */ std::tuple< StartEvent, ConnectionPipeline::AwaitActive::BlockingEvent, ConnectionPipeline::AwaitMap::BlockingEvent, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent, ConnectionPipeline::GetPG::BlockingEvent, PGMap::PGCreationBlockingEvent, CompletionEvent > tracking_events; class instance_handle_t : public boost::intrusive_ref_counter< instance_handle_t, boost::thread_unsafe_counter> { public: // intrusive_ptr because seastar::lw_shared_ptr includes a cpu debug check // that we will fail since the core on which we allocate the request may not // be the core on which we perform with_pg_int. This is harmless, since we // don't leave any references on the source core, so we just bypass it by using // intrusive_ptr instead. using ref_t = boost::intrusive_ptr<instance_handle_t>; PipelineHandle handle; std::tuple< PGPipeline::AwaitMap::BlockingEvent, PG_OSDMapGate::OSDMapBlocker::BlockingEvent, PGPipeline::WaitForActive::BlockingEvent, PGActivationBlocker::BlockingEvent, PGPipeline::RecoverMissing::BlockingEvent, PGPipeline::GetOBC::BlockingEvent, PGPipeline::Process::BlockingEvent, PGPipeline::WaitRepop::BlockingEvent, PGPipeline::SendReply::BlockingEvent, CompletionEvent > pg_tracking_events; template <typename BlockingEventT, typename InterruptorT=void, typename F> auto with_blocking_event(F &&f, ClientRequest &op) { auto ret = std::forward<F>(f)( typename BlockingEventT::template Trigger<ClientRequest>{ std::get<BlockingEventT>(pg_tracking_events), op }); if constexpr (std::is_same_v<InterruptorT, void>) { return ret; } else { using ret_t = decltype(ret); return typename InterruptorT::template futurize_t<ret_t>{std::move(ret)}; } } template <typename InterruptorT=void, typename StageT> auto enter_stage(StageT &stage, ClientRequest &op) { return this->template with_blocking_event< typename StageT::BlockingEvent, InterruptorT>( [&stage, this](auto &&trigger) { return handle.template enter<ClientRequest>( stage, std::move(trigger)); }, op); } template < typename InterruptorT=void, typename BlockingObj, typename Method, typename... Args> auto enter_blocker( ClientRequest &op, BlockingObj &obj, Method method, Args&&... args) { return this->template with_blocking_event< typename BlockingObj::Blocker::BlockingEvent, InterruptorT>( [&obj, method, args=std::forward_as_tuple(std::move(args)...)](auto &&trigger) mutable { return apply_method_to_tuple( obj, method, std::tuple_cat( std::forward_as_tuple(std::move(trigger)), std::move(args)) ); }, op); } }; instance_handle_t::ref_t instance_handle; void reset_instance_handle() { instance_handle = new instance_handle_t; } auto get_instance_handle() { return instance_handle; } using ordering_hook_t = boost::intrusive::list_member_hook<>; ordering_hook_t ordering_hook; class Orderer { using list_t = boost::intrusive::list< ClientRequest, boost::intrusive::member_hook< ClientRequest, typename ClientRequest::ordering_hook_t, &ClientRequest::ordering_hook> >; list_t list; public: void add_request(ClientRequest &request) { assert(!request.ordering_hook.is_linked()); intrusive_ptr_add_ref(&request); list.push_back(request); } void remove_request(ClientRequest &request) { assert(request.ordering_hook.is_linked()); list.erase(list_t::s_iterator_to(request)); intrusive_ptr_release(&request); } void requeue(ShardServices &shard_services, Ref<PG> pg); void clear_and_cancel(); }; void complete_request(); static constexpr OperationTypeCode type = OperationTypeCode::client_request; ClientRequest( ShardServices &shard_services, crimson::net::ConnectionRef, Ref<MOSDOp> &&m); ~ClientRequest(); void print(std::ostream &) const final; void dump_detail(Formatter *f) const final; static constexpr bool can_create() { return false; } spg_t get_pgid() const { return m->get_spg(); } PipelineHandle &get_handle() { return instance_handle->handle; } epoch_t get_epoch() const { return m->get_min_epoch(); } ConnectionPipeline &get_connection_pipeline(); seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() { assert(conn); return conn.get_foreign( ).then([this](auto f_conn) { conn.reset(); return f_conn; }); } void finish_remote_submission(crimson::net::ConnectionFRef _conn) { assert(!conn); conn = make_local_shared_foreign(std::move(_conn)); } seastar::future<> with_pg_int( ShardServices &shard_services, Ref<PG> pg); public: seastar::future<> with_pg( ShardServices &shard_services, Ref<PG> pgref); private: template <typename FuncT> interruptible_future<> with_sequencer(FuncT&& func); auto reply_op_error(const Ref<PG>& pg, int err); interruptible_future<> do_process( instance_handle_t &ihref, Ref<PG>& pg, crimson::osd::ObjectContextRef obc); ::crimson::interruptible::interruptible_future< ::crimson::osd::IOInterruptCondition> process_pg_op( Ref<PG> &pg); ::crimson::interruptible::interruptible_future< ::crimson::osd::IOInterruptCondition> process_op( instance_handle_t &ihref, Ref<PG> &pg); bool is_pg_op() const; PGPipeline &client_pp(PG &pg); template <typename Errorator> using interruptible_errorator = ::crimson::interruptible::interruptible_errorator< ::crimson::osd::IOInterruptCondition, Errorator>; bool is_misdirected(const PG& pg) const; const SnapContext get_snapc( Ref<PG>& pg, crimson::osd::ObjectContextRef obc) const; public: friend class LttngBackend; friend class HistoricBackend; auto get_started() const { return get_event<StartEvent>().get_timestamp(); }; auto get_completed() const { return get_event<CompletionEvent>().get_timestamp(); }; void put_historic() const; }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::ClientRequest> : fmt::ostream_formatter {}; #endif
9,103
31.283688
91
h
null
ceph-main/src/crimson/osd/osd_operations/client_request_common.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #include "crimson/osd/osd_operations/client_request_common.h" #include "crimson/osd/pg.h" #include "crimson/osd/osd_operations/background_recovery.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { typename InterruptibleOperation::template interruptible_future<> CommonClientRequest::do_recover_missing( Ref<PG>& pg, const hobject_t& soid) { eversion_t ver; assert(pg->is_primary()); logger().debug("{} check for recovery, {}", __func__, soid); if (!pg->is_unreadable_object(soid, &ver) && !pg->is_degraded_or_backfilling_object(soid)) { return seastar::now(); } logger().debug("{} need to wait for recovery, {}", __func__, soid); if (pg->get_recovery_backend()->is_recovering(soid)) { return pg->get_recovery_backend()->get_recovering(soid).wait_for_recovered(); } else { auto [op, fut] = pg->get_shard_services().start_operation<UrgentRecovery>( soid, ver, pg, pg->get_shard_services(), pg->get_osdmap_epoch()); return std::move(fut); } } bool CommonClientRequest::should_abort_request( const Operation& op, std::exception_ptr eptr) { if (*eptr.__cxa_exception_type() == typeid(::crimson::common::actingset_changed)) { try { std::rethrow_exception(eptr); } catch(::crimson::common::actingset_changed& e) { if (e.is_primary()) { logger().debug("{} {} operation restart, acting set changed", __func__, op); return false; } else { logger().debug("{} {} operation abort, up primary changed", __func__, op); return true; } } } else { assert(*eptr.__cxa_exception_type() == typeid(crimson::common::system_shutdown_exception)); crimson::get_logger(ceph_subsys_osd).debug( "{} {} operation skipped, system shutdown", __func__, op); return true; } } } // namespace crimson::osd
2,027
30.2
84
cc
null
ceph-main/src/crimson/osd/osd_operations/client_request_common.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/common/operation.h" #include "crimson/common/type_helpers.h" #include "crimson/osd/osd_operation.h" namespace crimson::osd { struct CommonClientRequest { static InterruptibleOperation::template interruptible_future<> do_recover_missing(Ref<PG>& pg, const hobject_t& soid); static bool should_abort_request( const crimson::Operation& op, std::exception_ptr eptr); }; } // namespace crimson::osd
539
24.714286
70
h
null
ceph-main/src/crimson/osd/osd_operations/internal_client_request.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #include <seastar/core/future.hh> #include "crimson/osd/osd_operations/internal_client_request.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson { template <> struct EventBackendRegistry<osd::InternalClientRequest> { static std::tuple<> get_backends() { return {}; } }; } namespace crimson::osd { InternalClientRequest::InternalClientRequest(Ref<PG> pg) : pg(std::move(pg)) { assert(bool(this->pg)); assert(this->pg->is_primary()); } InternalClientRequest::~InternalClientRequest() { logger().debug("{}: destroying", *this); } void InternalClientRequest::print(std::ostream &) const { } void InternalClientRequest::dump_detail(Formatter *f) const { } CommonPGPipeline& InternalClientRequest::client_pp() { return pg->request_pg_pipeline; } seastar::future<> InternalClientRequest::start() { track_event<StartEvent>(); return crimson::common::handle_system_shutdown([this] { return seastar::repeat([this] { logger().debug("{}: in repeat", *this); return interruptor::with_interruption([this]() mutable { return enter_stage<interruptor>( client_pp().wait_for_active ).then_interruptible([this] { return with_blocking_event<PGActivationBlocker::BlockingEvent, interruptor>([this] (auto&& trigger) { return pg->wait_for_active_blocker.wait(std::move(trigger)); }); }).then_interruptible([this] { return enter_stage<interruptor>( client_pp().recover_missing); }).then_interruptible([this] { return do_recover_missing(pg, get_target_oid()); }).then_interruptible([this] { return enter_stage<interruptor>( client_pp().get_obc); }).then_interruptible([this] () -> PG::load_obc_iertr::future<> { logger().debug("{}: getting obc lock", *this); return seastar::do_with(create_osd_ops(), [this](auto& osd_ops) mutable { logger().debug("InternalClientRequest: got {} OSDOps to execute", std::size(osd_ops)); [[maybe_unused]] const int ret = op_info.set_from_op( std::as_const(osd_ops), pg->get_pgid().pgid, *pg->get_osdmap()); assert(ret == 0); return pg->with_locked_obc(get_target_oid(), op_info, [&osd_ops, this](auto obc) { return enter_stage<interruptor>(client_pp().process ).then_interruptible( [obc=std::move(obc), &osd_ops, this] { return pg->do_osd_ops( std::move(obc), osd_ops, std::as_const(op_info), get_do_osd_ops_params(), [] { return PG::do_osd_ops_iertr::now(); }, [] (const std::error_code& e) { return PG::do_osd_ops_iertr::now(); } ).safe_then_unpack_interruptible( [](auto submitted, auto all_completed) { return all_completed.handle_error_interruptible( crimson::ct_error::eagain::handle([] { return seastar::now(); })); }, crimson::ct_error::eagain::handle([] { return interruptor::now(); }) ); }); }); }); }).handle_error_interruptible(PG::load_obc_ertr::all_same_way([] { return seastar::now(); })).then_interruptible([] { return seastar::stop_iteration::yes; }); }, [this](std::exception_ptr eptr) { if (should_abort_request(*this, std::move(eptr))) { return seastar::stop_iteration::yes; } else { return seastar::stop_iteration::no; } }, pg); }).then([this] { track_event<CompletionEvent>(); }); }); } } // namespace crimson::osd
4,159
30.755725
78
cc
null
ceph-main/src/crimson/osd/osd_operations/internal_client_request.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/common/type_helpers.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/osd_operations/client_request_common.h" #include "crimson/osd/osd_operations/common/pg_pipeline.h" #include "crimson/osd/pg.h" #include "crimson/osd/pg_activation_blocker.h" namespace crimson::osd { class InternalClientRequest : public PhasedOperationT<InternalClientRequest>, private CommonClientRequest { public: explicit InternalClientRequest(Ref<PG> pg); ~InternalClientRequest(); // imposed by `ShardService::start_operation<T>(...)`. seastar::future<> start(); protected: virtual const hobject_t& get_target_oid() const = 0; virtual PG::do_osd_ops_params_t get_do_osd_ops_params() const = 0; virtual std::vector<OSDOp> create_osd_ops() = 0; const PG& get_pg() const { return *pg; } private: friend OperationT<InternalClientRequest>; static constexpr OperationTypeCode type = OperationTypeCode::internal_client_request; void print(std::ostream &) const final; void dump_detail(Formatter *f) const final; CommonPGPipeline& client_pp(); seastar::future<> do_process(); Ref<PG> pg; OpInfo op_info; PipelineHandle handle; public: PipelineHandle& get_handle() { return handle; } std::tuple< StartEvent, CommonPGPipeline::WaitForActive::BlockingEvent, PGActivationBlocker::BlockingEvent, CommonPGPipeline::RecoverMissing::BlockingEvent, CommonPGPipeline::GetOBC::BlockingEvent, CommonPGPipeline::Process::BlockingEvent, CompletionEvent > tracking_events; }; } // namespace crimson::osd #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::InternalClientRequest> : fmt::ostream_formatter {}; #endif
1,855
25.898551
99
h
null
ceph-main/src/crimson/osd/osd_operations/logmissing_request.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "logmissing_request.h" #include "common/Formatter.h" #include "crimson/osd/osd.h" #include "crimson/osd/osd_connection_priv.h" #include "crimson/osd/osd_operation_external_tracking.h" #include "crimson/osd/pg.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { LogMissingRequest::LogMissingRequest(crimson::net::ConnectionRef&& conn, Ref<MOSDPGUpdateLogMissing> &&req) : conn{std::move(conn)}, req{std::move(req)} {} void LogMissingRequest::print(std::ostream& os) const { os << "LogMissingRequest(" << "from=" << req->from << " req=" << *req << ")"; } void LogMissingRequest::dump_detail(Formatter *f) const { f->open_object_section("LogMissingRequest"); f->dump_stream("req_tid") << req->get_tid(); f->dump_stream("pgid") << req->get_spg(); f->dump_unsigned("map_epoch", req->get_map_epoch()); f->dump_unsigned("min_epoch", req->get_min_epoch()); f->dump_stream("entries") << req->entries; f->dump_stream("from") << req->from; f->close_section(); } ConnectionPipeline &LogMissingRequest::get_connection_pipeline() { return get_osd_priv(conn.get()).replicated_request_conn_pipeline; } ClientRequest::PGPipeline &LogMissingRequest::client_pp(PG &pg) { return pg.request_pg_pipeline; } seastar::future<> LogMissingRequest::with_pg( ShardServices &shard_services, Ref<PG> pg) { logger().debug("{}: LogMissingRequest::with_pg", *this); IRef ref = this; return interruptor::with_interruption([this, pg] { return pg->do_update_log_missing(req, conn); }, [ref](std::exception_ptr) { return seastar::now(); }, pg); } }
1,771
24.681159
72
cc
null
ceph-main/src/crimson/osd/osd_operations/logmissing_request.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/net/Connection.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/osd_operations/client_request.h" #include "crimson/osd/pg_map.h" #include "crimson/common/type_helpers.h" #include "messages/MOSDPGUpdateLogMissing.h" namespace ceph { class Formatter; } namespace crimson::osd { class ShardServices; class OSD; class PG; class LogMissingRequest final : public PhasedOperationT<LogMissingRequest> { public: static constexpr OperationTypeCode type = OperationTypeCode::logmissing_request; LogMissingRequest(crimson::net::ConnectionRef&&, Ref<MOSDPGUpdateLogMissing>&&); void print(std::ostream &) const final; void dump_detail(ceph::Formatter* f) const final; static constexpr bool can_create() { return false; } spg_t get_pgid() const { return req->get_spg(); } PipelineHandle &get_handle() { return handle; } epoch_t get_epoch() const { return req->get_min_epoch(); } ConnectionPipeline &get_connection_pipeline(); seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() { assert(conn); return conn.get_foreign( ).then([this](auto f_conn) { conn.reset(); return f_conn; }); } void finish_remote_submission(crimson::net::ConnectionFRef _conn) { assert(!conn); conn = make_local_shared_foreign(std::move(_conn)); } seastar::future<> with_pg( ShardServices &shard_services, Ref<PG> pg); std::tuple< StartEvent, ConnectionPipeline::AwaitActive::BlockingEvent, ConnectionPipeline::AwaitMap::BlockingEvent, ConnectionPipeline::GetPG::BlockingEvent, PGMap::PGCreationBlockingEvent, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent > tracking_events; private: ClientRequest::PGPipeline &client_pp(PG &pg); crimson::net::ConnectionRef conn; // must be after `conn` to ensure the ConnectionPipeline's is alive PipelineHandle handle; Ref<MOSDPGUpdateLogMissing> req; }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::LogMissingRequest> : fmt::ostream_formatter {}; #endif
2,211
26.65
95
h
null
ceph-main/src/crimson/osd/osd_operations/logmissing_request_reply.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "logmissing_request_reply.h" #include "common/Formatter.h" #include "crimson/osd/osd.h" #include "crimson/osd/osd_connection_priv.h" #include "crimson/osd/osd_operation_external_tracking.h" #include "crimson/osd/pg.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { LogMissingRequestReply::LogMissingRequestReply( crimson::net::ConnectionRef&& conn, Ref<MOSDPGUpdateLogMissingReply> &&req) : conn{std::move(conn)}, req{std::move(req)} {} void LogMissingRequestReply::print(std::ostream& os) const { os << "LogMissingRequestReply(" << "from=" << req->from << " req=" << *req << ")"; } void LogMissingRequestReply::dump_detail(Formatter *f) const { f->open_object_section("LogMissingRequestReply"); f->dump_stream("rep_tid") << req->get_tid(); f->dump_stream("pgid") << req->get_spg(); f->dump_unsigned("map_epoch", req->get_map_epoch()); f->dump_unsigned("min_epoch", req->get_min_epoch()); f->dump_stream("from") << req->from; f->close_section(); } ConnectionPipeline &LogMissingRequestReply::get_connection_pipeline() { return get_osd_priv(conn.get()).replicated_request_conn_pipeline; } ClientRequest::PGPipeline &LogMissingRequestReply::client_pp(PG &pg) { return pg.request_pg_pipeline; } seastar::future<> LogMissingRequestReply::with_pg( ShardServices &shard_services, Ref<PG> pg) { logger().debug("{}: LogMissingRequestReply::with_pg", *this); IRef ref = this; return interruptor::with_interruption([this, pg] { return pg->do_update_log_missing_reply(std::move(req)); }, [ref](std::exception_ptr) { return seastar::now(); }, pg); } }
1,794
25.014493
70
cc
null
ceph-main/src/crimson/osd/osd_operations/logmissing_request_reply.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/net/Connection.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/osd_operations/client_request.h" #include "crimson/osd/pg_map.h" #include "crimson/common/type_helpers.h" #include "messages/MOSDPGUpdateLogMissingReply.h" namespace ceph { class Formatter; } namespace crimson::osd { class ShardServices; class OSD; class PG; class LogMissingRequestReply final : public PhasedOperationT<LogMissingRequestReply> { public: static constexpr OperationTypeCode type = OperationTypeCode::logmissing_request_reply; LogMissingRequestReply(crimson::net::ConnectionRef&&, Ref<MOSDPGUpdateLogMissingReply>&&); void print(std::ostream &) const final; void dump_detail(ceph::Formatter* f) const final; static constexpr bool can_create() { return false; } spg_t get_pgid() const { return req->get_spg(); } PipelineHandle &get_handle() { return handle; } epoch_t get_epoch() const { return req->get_min_epoch(); } ConnectionPipeline &get_connection_pipeline(); seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() { assert(conn); return conn.get_foreign( ).then([this](auto f_conn) { conn.reset(); return f_conn; }); } void finish_remote_submission(crimson::net::ConnectionFRef _conn) { assert(!conn); conn = make_local_shared_foreign(std::move(_conn)); } seastar::future<> with_pg( ShardServices &shard_services, Ref<PG> pg); std::tuple< StartEvent, ConnectionPipeline::AwaitActive::BlockingEvent, ConnectionPipeline::AwaitMap::BlockingEvent, ConnectionPipeline::GetPG::BlockingEvent, PGMap::PGCreationBlockingEvent, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent > tracking_events; private: ClientRequest::PGPipeline &client_pp(PG &pg); crimson::net::ConnectionRef conn; // must be after `conn` to ensure the ConnectionPipeline's is alive PipelineHandle handle; Ref<MOSDPGUpdateLogMissingReply> req; }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::LogMissingRequestReply> : fmt::ostream_formatter {}; #endif
2,252
27.1625
100
h
null
ceph-main/src/crimson/osd/osd_operations/osdop_params.h
#pragma once #include "messages/MOSDOp.h" #include "osd/osd_types.h" #include "crimson/common/type_helpers.h" // The fields in this struct are parameters that may be needed in multiple // level of processing. I inclosed all those parameters in this struct to // avoid passing each of them as a method parameter. struct osd_op_params_t { osd_reqid_t req_id; utime_t mtime; eversion_t at_version; eversion_t pg_trim_to; eversion_t min_last_complete_ondisk; eversion_t last_complete; version_t user_at_version = 0; bool user_modify = false; ObjectCleanRegions clean_regions; osd_op_params_t() = default; };
627
26.304348
74
h
null
ceph-main/src/crimson/osd/osd_operations/peering_event.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <seastar/core/future.hh> #include <seastar/core/sleep.hh> #include "messages/MOSDPGLog.h" #include "common/Formatter.h" #include "crimson/osd/pg.h" #include "crimson/osd/osd.h" #include "crimson/osd/osd_operation_external_tracking.h" #include "crimson/osd/osd_operations/peering_event.h" #include "crimson/osd/osd_connection_priv.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { template <class T> void PeeringEvent<T>::print(std::ostream &lhs) const { lhs << "PeeringEvent(" << "from=" << from << " pgid=" << pgid << " sent=" << evt.get_epoch_sent() << " requested=" << evt.get_epoch_requested() << " evt=" << evt.get_desc() << ")"; } template <class T> void PeeringEvent<T>::dump_detail(Formatter *f) const { f->open_object_section("PeeringEvent"); f->dump_stream("from") << from; f->dump_stream("pgid") << pgid; f->dump_int("sent", evt.get_epoch_sent()); f->dump_int("requested", evt.get_epoch_requested()); f->dump_string("evt", evt.get_desc()); f->open_array_section("events"); { std::apply([f](auto&... events) { (..., events.dump(f)); }, static_cast<const T*>(this)->tracking_events); } f->close_section(); f->close_section(); } template <class T> PGPeeringPipeline &PeeringEvent<T>::peering_pp(PG &pg) { return pg.peering_request_pg_pipeline; } template <class T> seastar::future<> PeeringEvent<T>::with_pg( ShardServices &shard_services, Ref<PG> pg) { if (!pg) { logger().warn("{}: pg absent, did not create", *this); on_pg_absent(shard_services); that()->get_handle().exit(); return complete_rctx_no_pg(shard_services); } using interruptor = typename T::interruptor; return interruptor::with_interruption([this, pg, &shard_services] { logger().debug("{}: pg present", *this); return this->template enter_stage<interruptor>(peering_pp(*pg).await_map ).then_interruptible([this, pg] { return this->template with_blocking_event< PG_OSDMapGate::OSDMapBlocker::BlockingEvent >([this, pg](auto &&trigger) { return pg->osdmap_gate.wait_for_map( std::move(trigger), evt.get_epoch_sent()); }); }).then_interruptible([this, pg](auto) { return this->template enter_stage<interruptor>(peering_pp(*pg).process); }).then_interruptible([this, pg, &shard_services] { return pg->do_peering_event(evt, ctx ).then_interruptible([this, pg, &shard_services] { that()->get_handle().exit(); return complete_rctx(shard_services, pg); }); }).then_interruptible([pg, &shard_services]() -> typename T::template interruptible_future<> { if (!pg->get_need_up_thru()) { return seastar::now(); } return shard_services.send_alive(pg->get_same_interval_since()); }).then_interruptible([&shard_services] { return shard_services.send_pg_temp(); }); }, [this](std::exception_ptr ep) { logger().debug("{}: interrupted with {}", *this, ep); }, pg); } template <class T> void PeeringEvent<T>::on_pg_absent(ShardServices &) { logger().debug("{}: pg absent, dropping", *this); } template <class T> typename PeeringEvent<T>::template interruptible_future<> PeeringEvent<T>::complete_rctx(ShardServices &shard_services, Ref<PG> pg) { logger().debug("{}: submitting ctx", *this); return shard_services.dispatch_context( pg->get_collection_ref(), std::move(ctx)); } ConnectionPipeline &RemotePeeringEvent::get_connection_pipeline() { return get_osd_priv(conn.get()).peering_request_conn_pipeline; } void RemotePeeringEvent::on_pg_absent(ShardServices &shard_services) { if (auto& e = get_event().get_event(); e.dynamic_type() == MQuery::static_type()) { const auto map_epoch = shard_services.get_map()->get_epoch(); const auto& q = static_cast<const MQuery&>(e); const pg_info_t empty{spg_t{pgid.pgid, q.query.to}}; if (q.query.type == q.query.LOG || q.query.type == q.query.FULLLOG) { auto m = crimson::make_message<MOSDPGLog>(q.query.from, q.query.to, map_epoch, empty, q.query.epoch_sent); ctx.send_osd_message(q.from.osd, std::move(m)); } else { ctx.send_notify(q.from.osd, {q.query.from, q.query.to, q.query.epoch_sent, map_epoch, empty, PastIntervals{}}); } } } RemotePeeringEvent::interruptible_future<> RemotePeeringEvent::complete_rctx( ShardServices &shard_services, Ref<PG> pg) { if (pg) { return PeeringEvent::complete_rctx(shard_services, pg); } else { return shard_services.dispatch_context_messages(std::move(ctx)); } } seastar::future<> RemotePeeringEvent::complete_rctx_no_pg( ShardServices &shard_services) { return shard_services.dispatch_context_messages(std::move(ctx)); } seastar::future<> LocalPeeringEvent::start() { logger().debug("{}: start", *this); IRef ref = this; auto maybe_delay = seastar::now(); if (delay) { maybe_delay = seastar::sleep( std::chrono::milliseconds(std::lround(delay * 1000))); } return maybe_delay.then([this] { return with_pg(pg->get_shard_services(), pg); }).finally([ref=std::move(ref)] { logger().debug("{}: complete", *ref); }); } LocalPeeringEvent::~LocalPeeringEvent() {} template class PeeringEvent<RemotePeeringEvent>; template class PeeringEvent<LocalPeeringEvent>; }
5,487
27.732984
78
cc
null
ceph-main/src/crimson/osd/osd_operations/peering_event.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <iostream> #include <seastar/core/future.hh> #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_operation.h" #include "osd/osd_types.h" #include "osd/PGPeeringEvent.h" #include "osd/PeeringState.h" namespace ceph { class Formatter; } namespace crimson::osd { class OSD; class ShardServices; class PG; class BackfillRecovery; class PGPeeringPipeline { struct AwaitMap : OrderedExclusivePhaseT<AwaitMap> { static constexpr auto type_name = "PeeringEvent::PGPipeline::await_map"; } await_map; struct Process : OrderedExclusivePhaseT<Process> { static constexpr auto type_name = "PeeringEvent::PGPipeline::process"; } process; template <class T> friend class PeeringEvent; friend class LocalPeeringEvent; friend class RemotePeeringEvent; friend class PGAdvanceMap; friend class BackfillRecovery; }; template <class T> class PeeringEvent : public PhasedOperationT<T> { T* that() { return static_cast<T*>(this); } const T* that() const { return static_cast<const T*>(this); } public: static constexpr OperationTypeCode type = OperationTypeCode::peering_event; protected: PGPeeringPipeline &peering_pp(PG &pg); PeeringCtx ctx; pg_shard_t from; spg_t pgid; float delay = 0; PGPeeringEvent evt; const pg_shard_t get_from() const { return from; } const spg_t get_pgid() const { return pgid; } const PGPeeringEvent &get_event() const { return evt; } virtual void on_pg_absent(ShardServices &); virtual typename PeeringEvent::template interruptible_future<> complete_rctx(ShardServices &, Ref<PG>); virtual seastar::future<> complete_rctx_no_pg( ShardServices &shard_services ) { return seastar::now();} public: template <typename... Args> PeeringEvent( const pg_shard_t &from, const spg_t &pgid, Args&&... args) : from(from), pgid(pgid), evt(std::forward<Args>(args)...) {} template <typename... Args> PeeringEvent( const pg_shard_t &from, const spg_t &pgid, float delay, Args&&... args) : from(from), pgid(pgid), delay(delay), evt(std::forward<Args>(args)...) {} void print(std::ostream &) const final; void dump_detail(ceph::Formatter* f) const final; seastar::future<> with_pg( ShardServices &shard_services, Ref<PG> pg); }; class RemotePeeringEvent : public PeeringEvent<RemotePeeringEvent> { protected: crimson::net::ConnectionRef conn; // must be after conn due to ConnectionPipeline's life-time PipelineHandle handle; void on_pg_absent(ShardServices &) final; PeeringEvent::interruptible_future<> complete_rctx( ShardServices &shard_services, Ref<PG> pg) override; seastar::future<> complete_rctx_no_pg( ShardServices &shard_services ) override; public: class OSDPipeline { struct AwaitActive : OrderedExclusivePhaseT<AwaitActive> { static constexpr auto type_name = "PeeringRequest::OSDPipeline::await_active"; } await_active; friend class RemotePeeringEvent; }; template <typename... Args> RemotePeeringEvent(crimson::net::ConnectionRef conn, Args&&... args) : PeeringEvent(std::forward<Args>(args)...), conn(conn) {} std::tuple< StartEvent, ConnectionPipeline::AwaitActive::BlockingEvent, ConnectionPipeline::AwaitMap::BlockingEvent, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent, ConnectionPipeline::GetPG::BlockingEvent, PGMap::PGCreationBlockingEvent, PGPeeringPipeline::AwaitMap::BlockingEvent, PG_OSDMapGate::OSDMapBlocker::BlockingEvent, PGPeeringPipeline::Process::BlockingEvent, OSDPipeline::AwaitActive::BlockingEvent, CompletionEvent > tracking_events; static constexpr bool can_create() { return true; } auto get_create_info() { return std::move(evt.create_info); } spg_t get_pgid() const { return pgid; } PipelineHandle &get_handle() { return handle; } epoch_t get_epoch() const { return evt.get_epoch_sent(); } ConnectionPipeline &get_connection_pipeline(); seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() { assert(conn); return conn.get_foreign( ).then([this](auto f_conn) { conn.reset(); return f_conn; }); } void finish_remote_submission(crimson::net::ConnectionFRef _conn) { assert(!conn); conn = make_local_shared_foreign(std::move(_conn)); } }; class LocalPeeringEvent final : public PeeringEvent<LocalPeeringEvent> { protected: Ref<PG> pg; PipelineHandle handle; public: template <typename... Args> LocalPeeringEvent(Ref<PG> pg, Args&&... args) : PeeringEvent(std::forward<Args>(args)...), pg(pg) {} seastar::future<> start(); virtual ~LocalPeeringEvent(); PipelineHandle &get_handle() { return handle; } std::tuple< StartEvent, PGPeeringPipeline::AwaitMap::BlockingEvent, PG_OSDMapGate::OSDMapBlocker::BlockingEvent, PGPeeringPipeline::Process::BlockingEvent, CompletionEvent > tracking_events; }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::LocalPeeringEvent> : fmt::ostream_formatter {}; template <> struct fmt::formatter<crimson::osd::RemotePeeringEvent> : fmt::ostream_formatter {}; template <class T> struct fmt::formatter<crimson::osd::PeeringEvent<T>> : fmt::ostream_formatter {}; #endif
5,450
25.206731
100
h
null
ceph-main/src/crimson/osd/osd_operations/pg_advance_map.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <seastar/core/future.hh> #include "include/types.h" #include "common/Formatter.h" #include "crimson/osd/pg.h" #include "crimson/osd/osdmap_service.h" #include "crimson/osd/shard_services.h" #include "crimson/osd/osd_operations/pg_advance_map.h" #include "crimson/osd/osd_operation_external_tracking.h" #include "osd/PeeringState.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { PGAdvanceMap::PGAdvanceMap( ShardServices &shard_services, Ref<PG> pg, epoch_t to, PeeringCtx &&rctx, bool do_init) : shard_services(shard_services), pg(pg), to(to), rctx(std::move(rctx)), do_init(do_init) { logger().debug("{}: created", *this); } PGAdvanceMap::~PGAdvanceMap() {} void PGAdvanceMap::print(std::ostream &lhs) const { lhs << "PGAdvanceMap(" << "pg=" << pg->get_pgid() << " from=" << (from ? *from : -1) << " to=" << to; if (do_init) { lhs << " do_init"; } lhs << ")"; } void PGAdvanceMap::dump_detail(Formatter *f) const { f->open_object_section("PGAdvanceMap"); f->dump_stream("pgid") << pg->get_pgid(); if (from) { f->dump_int("from", *from); } f->dump_int("to", to); f->dump_bool("do_init", do_init); f->close_section(); } PGPeeringPipeline &PGAdvanceMap::peering_pp(PG &pg) { return pg.peering_request_pg_pipeline; } seastar::future<> PGAdvanceMap::start() { using cached_map_t = OSDMapService::cached_map_t; logger().debug("{}: start", *this); IRef ref = this; return enter_stage<>( peering_pp(*pg).process ).then([this] { /* * PGAdvanceMap is scheduled at pg creation and when * broadcasting new osdmaps to pgs. We are not able to serialize * between the two different PGAdvanceMap callers since a new pg * will get advanced to the latest osdmap at it's creation. * As a result, we may need to adjust the PGAdvance operation * 'from' epoch. * See: https://tracker.ceph.com/issues/61744 */ from = pg->get_osdmap_epoch(); auto fut = seastar::now(); if (do_init) { fut = pg->handle_initialize(rctx ).then([this] { return pg->handle_activate_map(rctx); }); } return fut.then([this] { ceph_assert(std::cmp_less_equal(*from, to)); return seastar::do_for_each( boost::make_counting_iterator(*from + 1), boost::make_counting_iterator(to + 1), [this](epoch_t next_epoch) { logger().debug("{}: start: getting map {}", *this, next_epoch); return shard_services.get_map(next_epoch).then( [this] (cached_map_t&& next_map) { logger().debug("{}: advancing map to {}", *this, next_map->get_epoch()); return pg->handle_advance_map(next_map, rctx); }); }).then([this] { return pg->handle_activate_map(rctx).then([this] { logger().debug("{}: map activated", *this); if (do_init) { shard_services.pg_created(pg->get_pgid(), pg); logger().info("PGAdvanceMap::start new pg {}", *pg); } return seastar::when_all_succeed( pg->get_need_up_thru() ? shard_services.send_alive( pg->get_same_interval_since()) : seastar::now(), shard_services.dispatch_context( pg->get_collection_ref(), std::move(rctx))); }); }).then_unpack([this] { logger().debug("{}: sending pg temp", *this); return shard_services.send_pg_temp(); }); }); }).then([this, ref=std::move(ref)] { logger().debug("{}: complete", *this); }); } }
3,601
26.496183
70
cc
null
ceph-main/src/crimson/osd/osd_operations/pg_advance_map.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <iostream> #include <seastar/core/future.hh> #include "crimson/osd/osd_operation.h" #include "crimson/osd/osd_operations/peering_event.h" #include "osd/osd_types.h" #include "crimson/common/type_helpers.h" namespace ceph { class Formatter; } namespace crimson::osd { class ShardServices; class PG; class PGAdvanceMap : public PhasedOperationT<PGAdvanceMap> { public: static constexpr OperationTypeCode type = OperationTypeCode::pg_advance_map; protected: ShardServices &shard_services; Ref<PG> pg; PipelineHandle handle; std::optional<epoch_t> from; epoch_t to; PeeringCtx rctx; const bool do_init; public: PGAdvanceMap( ShardServices &shard_services, Ref<PG> pg, epoch_t to, PeeringCtx &&rctx, bool do_init); ~PGAdvanceMap(); void print(std::ostream &) const final; void dump_detail(ceph::Formatter *f) const final; seastar::future<> start(); PipelineHandle &get_handle() { return handle; } std::tuple< PGPeeringPipeline::Process::BlockingEvent > tracking_events; private: PGPeeringPipeline &peering_pp(PG &pg); }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::PGAdvanceMap> : fmt::ostream_formatter {}; #endif
1,325
20.387097
90
h
null
ceph-main/src/crimson/osd/osd_operations/recovery_subrequest.cc
#include <fmt/format.h> #include <fmt/ostream.h> #include "crimson/osd/osd_operations/recovery_subrequest.h" #include "crimson/osd/pg.h" #include "crimson/osd/osd_connection_priv.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson { template <> struct EventBackendRegistry<osd::RecoverySubRequest> { static std::tuple<> get_backends() { return {}; } }; } namespace crimson::osd { seastar::future<> RecoverySubRequest::with_pg( ShardServices &shard_services, Ref<PG> pgref) { logger().debug("{}: {}", "RecoverySubRequest::with_pg", *this); track_event<StartEvent>(); IRef opref = this; return interruptor::with_interruption([this, pgref] { return pgref->get_recovery_backend()->handle_recovery_op(m, conn); }, [](std::exception_ptr) { return seastar::now(); }, pgref).finally([this, opref, pgref] { track_event<CompletionEvent>(); }); } ConnectionPipeline &RecoverySubRequest::get_connection_pipeline() { return get_osd_priv(conn.get()).peering_request_conn_pipeline; } }
1,095
22.319149
70
cc
null
ceph-main/src/crimson/osd/osd_operations/recovery_subrequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "osd/osd_op_util.h" #include "crimson/net/Connection.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/pg.h" #include "crimson/common/type_helpers.h" #include "messages/MOSDFastDispatchOp.h" namespace crimson::osd { class PG; class RecoverySubRequest final : public PhasedOperationT<RecoverySubRequest> { public: static constexpr OperationTypeCode type = OperationTypeCode::background_recovery_sub; RecoverySubRequest( crimson::net::ConnectionRef conn, Ref<MOSDFastDispatchOp>&& m) : conn(conn), m(m) {} void print(std::ostream& out) const final { out << *m; } void dump_detail(Formatter *f) const final { } static constexpr bool can_create() { return false; } spg_t get_pgid() const { return m->get_spg(); } PipelineHandle &get_handle() { return handle; } epoch_t get_epoch() const { return m->get_min_epoch(); } ConnectionPipeline &get_connection_pipeline(); seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() { assert(conn); return conn.get_foreign( ).then([this](auto f_conn) { conn.reset(); return f_conn; }); } void finish_remote_submission(crimson::net::ConnectionFRef _conn) { assert(!conn); conn = make_local_shared_foreign(std::move(_conn)); } seastar::future<> with_pg( ShardServices &shard_services, Ref<PG> pg); std::tuple< StartEvent, ConnectionPipeline::AwaitActive::BlockingEvent, ConnectionPipeline::AwaitMap::BlockingEvent, ConnectionPipeline::GetPG::BlockingEvent, PGMap::PGCreationBlockingEvent, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent, CompletionEvent > tracking_events; private: crimson::net::ConnectionRef conn; // must be after `conn` to ensure the ConnectionPipeline's is alive PipelineHandle handle; Ref<MOSDFastDispatchOp> m; }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::RecoverySubRequest> : fmt::ostream_formatter {}; #endif
2,106
24.695122
96
h
null
ceph-main/src/crimson/osd/osd_operations/replicated_request.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "replicated_request.h" #include "common/Formatter.h" #include "crimson/osd/osd.h" #include "crimson/osd/osd_connection_priv.h" #include "crimson/osd/osd_operation_external_tracking.h" #include "crimson/osd/pg.h" namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson::osd { RepRequest::RepRequest(crimson::net::ConnectionRef&& conn, Ref<MOSDRepOp> &&req) : conn{std::move(conn)}, req{std::move(req)} {} void RepRequest::print(std::ostream& os) const { os << "RepRequest(" << "from=" << req->from << " req=" << *req << ")"; } void RepRequest::dump_detail(Formatter *f) const { f->open_object_section("RepRequest"); f->dump_stream("reqid") << req->reqid; f->dump_stream("pgid") << req->get_spg(); f->dump_unsigned("map_epoch", req->get_map_epoch()); f->dump_unsigned("min_epoch", req->get_min_epoch()); f->dump_stream("oid") << req->poid; f->dump_stream("from") << req->from; f->close_section(); } ConnectionPipeline &RepRequest::get_connection_pipeline() { return get_osd_priv(conn.get()).replicated_request_conn_pipeline; } ClientRequest::PGPipeline &RepRequest::client_pp(PG &pg) { return pg.request_pg_pipeline; } seastar::future<> RepRequest::with_pg( ShardServices &shard_services, Ref<PG> pg) { logger().debug("{}: RepRequest::with_pg", *this); IRef ref = this; return interruptor::with_interruption([this, pg] { logger().debug("{}: pg present", *this); return this->template enter_stage<interruptor>(client_pp(*pg).await_map ).then_interruptible([this, pg] { return this->template with_blocking_event< PG_OSDMapGate::OSDMapBlocker::BlockingEvent >([this, pg](auto &&trigger) { return pg->osdmap_gate.wait_for_map( std::move(trigger), req->min_epoch); }); }).then_interruptible([this, pg] (auto) { return pg->handle_rep_op(req); }); }, [ref](std::exception_ptr) { return seastar::now(); }, pg); } }
2,121
25.197531
75
cc
null
ceph-main/src/crimson/osd/osd_operations/replicated_request.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "crimson/net/Connection.h" #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/pg_map.h" #include "crimson/osd/osd_operations/client_request.h" #include "crimson/common/type_helpers.h" #include "messages/MOSDRepOp.h" namespace ceph { class Formatter; } namespace crimson::osd { class ShardServices; class OSD; class PG; class RepRequest final : public PhasedOperationT<RepRequest> { public: static constexpr OperationTypeCode type = OperationTypeCode::replicated_request; RepRequest(crimson::net::ConnectionRef&&, Ref<MOSDRepOp>&&); void print(std::ostream &) const final; void dump_detail(ceph::Formatter* f) const final; static constexpr bool can_create() { return false; } spg_t get_pgid() const { return req->get_spg(); } PipelineHandle &get_handle() { return handle; } epoch_t get_epoch() const { return req->get_min_epoch(); } ConnectionPipeline &get_connection_pipeline(); seastar::future<crimson::net::ConnectionFRef> prepare_remote_submission() { assert(conn); return conn.get_foreign( ).then([this](auto f_conn) { conn.reset(); return f_conn; }); } void finish_remote_submission(crimson::net::ConnectionFRef _conn) { assert(!conn); conn = make_local_shared_foreign(std::move(_conn)); } seastar::future<> with_pg( ShardServices &shard_services, Ref<PG> pg); std::tuple< StartEvent, ConnectionPipeline::AwaitActive::BlockingEvent, ConnectionPipeline::AwaitMap::BlockingEvent, ConnectionPipeline::GetPG::BlockingEvent, ClientRequest::PGPipeline::AwaitMap::BlockingEvent, PG_OSDMapGate::OSDMapBlocker::BlockingEvent, PGMap::PGCreationBlockingEvent, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent > tracking_events; private: ClientRequest::PGPipeline &client_pp(PG &pg); crimson::net::ConnectionRef conn; PipelineHandle handle; Ref<MOSDRepOp> req; }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::RepRequest> : fmt::ostream_formatter {}; #endif
2,179
25.91358
88
h
null
ceph-main/src/crimson/osd/osd_operations/snaptrim_event.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "crimson/osd/osd_operations/snaptrim_event.h" #include "crimson/osd/ops_executer.h" #include "crimson/osd/pg.h" #include <seastar/core/sleep.hh> namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_osd); } } namespace crimson { template <> struct EventBackendRegistry<osd::SnapTrimEvent> { static std::tuple<> get_backends() { return {}; } }; template <> struct EventBackendRegistry<osd::SnapTrimObjSubEvent> { static std::tuple<> get_backends() { return {}; } }; } namespace crimson::osd { void SnapTrimEvent::SubOpBlocker::dump_detail(Formatter *f) const { f->open_array_section("dependent_operations"); { for (const auto &kv : subops) { f->dump_unsigned("op_id", kv.first); } } f->close_section(); } template <class... Args> void SnapTrimEvent::SubOpBlocker::emplace_back(Args&&... args) { subops.emplace_back(std::forward<Args>(args)...); }; SnapTrimEvent::remove_or_update_iertr::future<> SnapTrimEvent::SubOpBlocker::wait_completion() { return interruptor::do_for_each(subops, [](auto&& kv) { return std::move(kv.second); }); } void SnapTrimEvent::print(std::ostream &lhs) const { lhs << "SnapTrimEvent(" << "pgid=" << pg->get_pgid() << " snapid=" << snapid << " needs_pause=" << needs_pause << ")"; } void SnapTrimEvent::dump_detail(Formatter *f) const { f->open_object_section("SnapTrimEvent"); f->dump_stream("pgid") << pg->get_pgid(); f->close_section(); } SnapTrimEvent::snap_trim_ertr::future<seastar::stop_iteration> SnapTrimEvent::start() { logger().debug("{}: {}", *this, __func__); return with_pg( pg->get_shard_services(), pg ).finally([ref=IRef{this}, this] { logger().debug("{}: complete", *ref); return handle.complete(); }); } CommonPGPipeline& SnapTrimEvent::client_pp() { return pg->request_pg_pipeline; } SnapTrimEvent::snap_trim_ertr::future<seastar::stop_iteration> SnapTrimEvent::with_pg( ShardServices &shard_services, Ref<PG> _pg) { return interruptor::with_interruption([&shard_services, this] { return enter_stage<interruptor>( client_pp().wait_for_active ).then_interruptible([this] { return with_blocking_event<PGActivationBlocker::BlockingEvent, interruptor>([this] (auto&& trigger) { return pg->wait_for_active_blocker.wait(std::move(trigger)); }); }).then_interruptible([this] { return enter_stage<interruptor>( client_pp().recover_missing); }).then_interruptible([] { //return do_recover_missing(pg, get_target_oid()); return seastar::now(); }).then_interruptible([this] { return enter_stage<interruptor>( client_pp().get_obc); }).then_interruptible([this] { return enter_stage<interruptor>( client_pp().process); }).then_interruptible([&shard_services, this] { return interruptor::async([this] { std::vector<hobject_t> to_trim; using crimson::common::local_conf; const auto max = local_conf().get_val<uint64_t>("osd_pg_max_concurrent_snap_trims"); // we need to look for at least 1 snaptrim, otherwise we'll misinterpret // the ENOENT below and erase snapid. int r = snap_mapper.get_next_objects_to_trim( snapid, max, &to_trim); if (r == -ENOENT) { to_trim.clear(); // paranoia return to_trim; } else if (r != 0) { logger().error("{}: get_next_objects_to_trim returned {}", *this, cpp_strerror(r)); ceph_abort_msg("get_next_objects_to_trim returned an invalid code"); } else { assert(!to_trim.empty()); } logger().debug("{}: async almost done line {}", *this, __LINE__); return to_trim; }).then_interruptible([&shard_services, this] (const auto& to_trim) { if (to_trim.empty()) { // the legit ENOENT -> done logger().debug("{}: to_trim is empty! Stopping iteration", *this); return snap_trim_iertr::make_ready_future<seastar::stop_iteration>( seastar::stop_iteration::yes); } for (const auto& object : to_trim) { logger().debug("{}: trimming {}", *this, object); auto [op, fut] = shard_services.start_operation_may_interrupt< interruptor, SnapTrimObjSubEvent>( pg, object, snapid); subop_blocker.emplace_back( op->get_id(), std::move(fut) ); } return enter_stage<interruptor>( wait_subop ).then_interruptible([this] { logger().debug("{}: awaiting completion", *this); return subop_blocker.wait_completion(); }).safe_then_interruptible([this] { if (!needs_pause) { return interruptor::now(); } // let's know operators we're waiting return enter_stage<interruptor>( wait_trim_timer ).then_interruptible([this] { using crimson::common::local_conf; const auto time_to_sleep = local_conf().template get_val<double>("osd_snap_trim_sleep"); logger().debug("{}: time_to_sleep {}", *this, time_to_sleep); // TODO: this logic should be more sophisticated and distinguish // between SSDs, HDDs and the hybrid case return seastar::sleep( std::chrono::milliseconds(std::lround(time_to_sleep * 1000))); }); }).safe_then_interruptible([this] { logger().debug("{}: all completed", *this); return snap_trim_iertr::make_ready_future<seastar::stop_iteration>( seastar::stop_iteration::no); }); }); }); }, [this](std::exception_ptr eptr) -> snap_trim_ertr::future<seastar::stop_iteration> { logger().debug("{}: interrupted {}", *this, eptr); return crimson::ct_error::eagain::make(); }, pg); } CommonPGPipeline& SnapTrimObjSubEvent::client_pp() { return pg->request_pg_pipeline; } SnapTrimObjSubEvent::remove_or_update_iertr::future<> SnapTrimObjSubEvent::start() { logger().debug("{}: start", *this); return with_pg( pg->get_shard_services(), pg ).finally([ref=IRef{this}, this] { logger().debug("{}: complete", *ref); return handle.complete(); }); } SnapTrimObjSubEvent::remove_or_update_iertr::future<> SnapTrimObjSubEvent::remove_clone( ObjectContextRef obc, ObjectContextRef head_obc, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries ) { const auto p = std::find( head_obc->ssc->snapset.clones.begin(), head_obc->ssc->snapset.clones.end(), coid.snap); if (p == head_obc->ssc->snapset.clones.end()) { logger().error("{}: Snap {} not in clones", *this, coid.snap); return crimson::ct_error::enoent::make(); } assert(p != head_obc->ssc->snapset.clones.end()); snapid_t last = coid.snap; delta_stats.num_bytes -= head_obc->ssc->snapset.get_clone_bytes(last); if (p != head_obc->ssc->snapset.clones.begin()) { // not the oldest... merge overlap into next older clone std::vector<snapid_t>::iterator n = p - 1; hobject_t prev_coid = coid; prev_coid.snap = *n; // does the classical OSD really need is_present_clone(prev_coid)? delta_stats.num_bytes -= head_obc->ssc->snapset.get_clone_bytes(*n); head_obc->ssc->snapset.clone_overlap[*n].intersection_of( head_obc->ssc->snapset.clone_overlap[*p]); delta_stats.num_bytes += head_obc->ssc->snapset.get_clone_bytes(*n); } delta_stats.num_objects--; if (obc->obs.oi.is_dirty()) { delta_stats.num_objects_dirty--; } if (obc->obs.oi.is_omap()) { delta_stats.num_objects_omap--; } if (obc->obs.oi.is_whiteout()) { logger().debug("{}: trimming whiteout on {}", *this, coid); delta_stats.num_whiteouts--; } delta_stats.num_object_clones--; obc->obs.exists = false; head_obc->ssc->snapset.clones.erase(p); head_obc->ssc->snapset.clone_overlap.erase(last); head_obc->ssc->snapset.clone_size.erase(last); head_obc->ssc->snapset.clone_snaps.erase(last); log_entries.emplace_back( pg_log_entry_t{ pg_log_entry_t::DELETE, coid, osd_op_p.at_version, obc->obs.oi.version, 0, osd_reqid_t(), obc->obs.oi.mtime, // will be replaced in `apply_to()` 0} ); txn.remove( pg->get_collection_ref()->get_cid(), ghobject_t{coid, ghobject_t::NO_GEN, shard_id_t::NO_SHARD}); obc->obs.oi = object_info_t(coid); return OpsExecuter::snap_map_remove(coid, pg->snap_mapper, pg->osdriver, txn); } void SnapTrimObjSubEvent::remove_head_whiteout( ObjectContextRef obc, ObjectContextRef head_obc, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries ) { // NOTE: this arguably constitutes minor interference with the // tiering agent if this is a cache tier since a snap trim event // is effectively evicting a whiteout we might otherwise want to // keep around. const auto head_oid = coid.get_head(); logger().info("{}: {} removing {}", *this, coid, head_oid); log_entries.emplace_back( pg_log_entry_t{ pg_log_entry_t::DELETE, head_oid, osd_op_p.at_version, head_obc->obs.oi.version, 0, osd_reqid_t(), obc->obs.oi.mtime, // will be replaced in `apply_to()` 0} ); logger().info("{}: remove snap head", *this); object_info_t& oi = head_obc->obs.oi; delta_stats.num_objects--; if (oi.is_dirty()) { delta_stats.num_objects_dirty--; } if (oi.is_omap()) { delta_stats.num_objects_omap--; } if (oi.is_whiteout()) { logger().debug("{}: trimming whiteout on {}", *this, oi.soid); delta_stats.num_whiteouts--; } head_obc->obs.exists = false; head_obc->obs.oi = object_info_t(head_oid); txn.remove(pg->get_collection_ref()->get_cid(), ghobject_t{head_oid, ghobject_t::NO_GEN, shard_id_t::NO_SHARD}); } SnapTrimObjSubEvent::interruptible_future<> SnapTrimObjSubEvent::adjust_snaps( ObjectContextRef obc, ObjectContextRef head_obc, const std::set<snapid_t>& new_snaps, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries ) { head_obc->ssc->snapset.clone_snaps[coid.snap] = std::vector<snapid_t>(new_snaps.rbegin(), new_snaps.rend()); // we still do a 'modify' event on this object just to trigger a // snapmapper.update ... :( obc->obs.oi.prior_version = obc->obs.oi.version; obc->obs.oi.version = osd_op_p.at_version; ceph::bufferlist bl; encode(obc->obs.oi, bl, pg->get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr)); txn.setattr( pg->get_collection_ref()->get_cid(), ghobject_t{coid, ghobject_t::NO_GEN, shard_id_t::NO_SHARD}, OI_ATTR, bl); log_entries.emplace_back( pg_log_entry_t{ pg_log_entry_t::MODIFY, coid, obc->obs.oi.version, obc->obs.oi.prior_version, 0, osd_reqid_t(), obc->obs.oi.mtime, 0} ); return OpsExecuter::snap_map_modify( coid, new_snaps, pg->snap_mapper, pg->osdriver, txn); } void SnapTrimObjSubEvent::update_head( ObjectContextRef obc, ObjectContextRef head_obc, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries ) { const auto head_oid = coid.get_head(); logger().info("{}: writing updated snapset on {}, snapset is {}", *this, head_oid, head_obc->ssc->snapset); log_entries.emplace_back( pg_log_entry_t{ pg_log_entry_t::MODIFY, head_oid, osd_op_p.at_version, head_obc->obs.oi.version, 0, osd_reqid_t(), obc->obs.oi.mtime, 0} ); head_obc->obs.oi.prior_version = head_obc->obs.oi.version; head_obc->obs.oi.version = osd_op_p.at_version; std::map<std::string, ceph::bufferlist, std::less<>> attrs; ceph::bufferlist bl; encode(head_obc->ssc->snapset, bl); attrs[SS_ATTR] = std::move(bl); bl.clear(); head_obc->obs.oi.encode_no_oid(bl, pg->get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr)); attrs[OI_ATTR] = std::move(bl); txn.setattrs( pg->get_collection_ref()->get_cid(), ghobject_t{head_oid, ghobject_t::NO_GEN, shard_id_t::NO_SHARD}, attrs); } SnapTrimObjSubEvent::remove_or_update_iertr::future< SnapTrimObjSubEvent::remove_or_update_ret_t> SnapTrimObjSubEvent::remove_or_update( ObjectContextRef obc, ObjectContextRef head_obc) { auto citer = head_obc->ssc->snapset.clone_snaps.find(coid.snap); if (citer == head_obc->ssc->snapset.clone_snaps.end()) { logger().error("{}: No clone_snaps in snapset {} for object {}", *this, head_obc->ssc->snapset, coid); return crimson::ct_error::enoent::make(); } const auto& old_snaps = citer->second; if (old_snaps.empty()) { logger().error("{}: no object info snaps for object {}", *this, coid); return crimson::ct_error::enoent::make(); } if (head_obc->ssc->snapset.seq == 0) { logger().error("{}: no snapset.seq for object {}", *this, coid); return crimson::ct_error::enoent::make(); } const OSDMapRef& osdmap = pg->get_osdmap(); std::set<snapid_t> new_snaps; for (const auto& old_snap : old_snaps) { if (!osdmap->in_removed_snaps_queue(pg->get_info().pgid.pgid.pool(), old_snap) && old_snap != snap_to_trim) { new_snaps.insert(old_snap); } } return seastar::do_with(ceph::os::Transaction{}, [=, this](auto &txn) { std::vector<pg_log_entry_t> log_entries{}; int64_t num_objects_before_trim = delta_stats.num_objects; osd_op_p.at_version = pg->next_version(); auto ret = remove_or_update_iertr::now(); if (new_snaps.empty()) { // remove clone from snapset logger().info("{}: {} snaps {} -> {} ... deleting", *this, coid, old_snaps, new_snaps); ret = remove_clone(obc, head_obc, txn, log_entries); } else { // save adjusted snaps for this object logger().info("{}: {} snaps {} -> {}", *this, coid, old_snaps, new_snaps); ret = adjust_snaps(obc, head_obc, new_snaps, txn, log_entries); } return std::move(ret).safe_then_interruptible( [&txn, obc, num_objects_before_trim, log_entries=std::move(log_entries), head_obc=std::move(head_obc), this]() mutable { osd_op_p.at_version = pg->next_version(); // save head snapset logger().debug("{}: {} new snapset {} on {}", *this, coid, head_obc->ssc->snapset, head_obc->obs.oi); if (head_obc->ssc->snapset.clones.empty() && head_obc->obs.oi.is_whiteout()) { remove_head_whiteout(obc, head_obc, txn, log_entries); } else { update_head(obc, head_obc, txn, log_entries); } // Stats reporting - Set number of objects trimmed if (num_objects_before_trim > delta_stats.num_objects) { //int64_t num_objects_trimmed = // num_objects_before_trim - delta_stats.num_objects; //add_objects_trimmed_count(num_objects_trimmed); } }).safe_then_interruptible( [&txn, log_entries=std::move(log_entries)] () mutable { return remove_or_update_iertr::make_ready_future<remove_or_update_ret_t>( std::make_pair(std::move(txn), std::move(log_entries))); }); }); } SnapTrimObjSubEvent::remove_or_update_iertr::future<> SnapTrimObjSubEvent::with_pg( ShardServices &shard_services, Ref<PG> _pg) { return enter_stage<interruptor>( client_pp().wait_for_active ).then_interruptible([this] { return with_blocking_event<PGActivationBlocker::BlockingEvent, interruptor>([this] (auto&& trigger) { return pg->wait_for_active_blocker.wait(std::move(trigger)); }); }).then_interruptible([this] { return enter_stage<interruptor>( client_pp().recover_missing); }).then_interruptible([] { //return do_recover_missing(pg, get_target_oid()); return seastar::now(); }).then_interruptible([this] { return enter_stage<interruptor>( client_pp().get_obc); }).then_interruptible([this] { logger().debug("{}: getting obc for {}", *this, coid); // end of commonality // with_head_and_clone_obc lock both clone's and head's obcs return pg->obc_loader.with_head_and_clone_obc<RWState::RWWRITE>( coid, [this](auto head_obc, auto clone_obc) { logger().debug("{}: got clone_obc={}", *this, clone_obc->get_oid()); return enter_stage<interruptor>( client_pp().process ).then_interruptible( [this,clone_obc=std::move(clone_obc), head_obc=std::move(head_obc)]() mutable { logger().debug("{}: processing clone_obc={}", *this, clone_obc->get_oid()); return remove_or_update( clone_obc, head_obc ).safe_then_unpack_interruptible([clone_obc, this] (auto&& txn, auto&& log_entries) mutable { auto [submitted, all_completed] = pg->submit_transaction( std::move(clone_obc), std::move(txn), std::move(osd_op_p), std::move(log_entries)); return submitted.then_interruptible( [all_completed=std::move(all_completed), this] () mutable { return enter_stage<interruptor>( wait_repop ).then_interruptible([all_completed=std::move(all_completed)] () mutable { return std::move(all_completed); }); }); }); }); }).handle_error_interruptible( remove_or_update_iertr::pass_further{}, crimson::ct_error::assert_all{"unexpected error in SnapTrimObjSubEvent"} ); }); } void SnapTrimObjSubEvent::print(std::ostream &lhs) const { lhs << "SnapTrimObjSubEvent(" << "coid=" << coid << " snapid=" << snap_to_trim << ")"; } void SnapTrimObjSubEvent::dump_detail(Formatter *f) const { f->open_object_section("SnapTrimObjSubEvent"); f->dump_stream("coid") << coid; f->close_section(); } } // namespace crimson::osd
18,261
31.963899
124
cc
null
ceph-main/src/crimson/osd/osd_operations/snaptrim_event.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <iostream> #include <seastar/core/future.hh> #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/osd_operation.h" #include "crimson/osd/osd_operations/common/pg_pipeline.h" #include "crimson/osd/pg.h" #include "crimson/osd/pg_activation_blocker.h" #include "osd/osd_types.h" #include "osd/PGPeeringEvent.h" #include "osd/PeeringState.h" namespace ceph { class Formatter; } class SnapMapper; namespace crimson::osd { class OSD; class ShardServices; class PG; // trim up to `max` objects for snapshot `snapid class SnapTrimEvent final : public PhasedOperationT<SnapTrimEvent> { public: using remove_or_update_ertr = crimson::errorator<crimson::ct_error::enoent>; using remove_or_update_iertr = crimson::interruptible::interruptible_errorator< IOInterruptCondition, remove_or_update_ertr>; using snap_trim_ertr = remove_or_update_ertr::extend< crimson::ct_error::eagain>; using snap_trim_iertr = remove_or_update_iertr::extend< crimson::ct_error::eagain>; static constexpr OperationTypeCode type = OperationTypeCode::snaptrim_event; SnapTrimEvent(Ref<PG> pg, SnapMapper& snap_mapper, const snapid_t snapid, const bool needs_pause) : pg(std::move(pg)), snap_mapper(snap_mapper), snapid(snapid), needs_pause(needs_pause) {} void print(std::ostream &) const final; void dump_detail(ceph::Formatter* f) const final; snap_trim_ertr::future<seastar::stop_iteration> start(); snap_trim_ertr::future<seastar::stop_iteration> with_pg( ShardServices &shard_services, Ref<PG> pg); private: CommonPGPipeline& client_pp(); // bases on 998cb8c141bb89aafae298a9d5e130fbd78fe5f2 struct SubOpBlocker : crimson::BlockerT<SubOpBlocker> { static constexpr const char* type_name = "CompoundOpBlocker"; using id_done_t = std::pair<crimson::Operation::id_t, remove_or_update_iertr::future<>>; void dump_detail(Formatter *f) const final; template <class... Args> void emplace_back(Args&&... args); remove_or_update_iertr::future<> wait_completion(); private: std::vector<id_done_t> subops; } subop_blocker; // we don't need to synchronize with other instances of SnapTrimEvent; // it's here for the sake of op tracking. struct WaitSubop : OrderedConcurrentPhaseT<WaitSubop> { static constexpr auto type_name = "SnapTrimEvent::wait_subop"; } wait_subop; // an instantiator can instruct us to go over this stage and then // wait for the future to implement throttling. It is implemented // that way to for the sake of tracking ops. struct WaitTrimTimer : OrderedExclusivePhaseT<WaitTrimTimer> { static constexpr auto type_name = "SnapTrimEvent::wait_trim_timer"; } wait_trim_timer; PipelineHandle handle; Ref<PG> pg; SnapMapper& snap_mapper; const snapid_t snapid; const bool needs_pause; public: PipelineHandle& get_handle() { return handle; } std::tuple< StartEvent, CommonPGPipeline::WaitForActive::BlockingEvent, PGActivationBlocker::BlockingEvent, CommonPGPipeline::RecoverMissing::BlockingEvent, CommonPGPipeline::GetOBC::BlockingEvent, CommonPGPipeline::Process::BlockingEvent, WaitSubop::BlockingEvent, WaitTrimTimer::BlockingEvent, CompletionEvent > tracking_events; }; // remove single object. a SnapTrimEvent can create multiple subrequests. // the division of labour is needed because of the restriction that an Op // cannot revisite a pipeline's stage it already saw. class SnapTrimObjSubEvent : public PhasedOperationT<SnapTrimObjSubEvent> { public: using remove_or_update_ertr = crimson::errorator<crimson::ct_error::enoent>; using remove_or_update_iertr = crimson::interruptible::interruptible_errorator< IOInterruptCondition, remove_or_update_ertr>; static constexpr OperationTypeCode type = OperationTypeCode::snaptrimobj_subevent; SnapTrimObjSubEvent( Ref<PG> pg, const hobject_t& coid, snapid_t snap_to_trim) : pg(std::move(pg)), coid(coid), snap_to_trim(snap_to_trim) { } void print(std::ostream &) const final; void dump_detail(ceph::Formatter* f) const final; remove_or_update_iertr::future<> start(); remove_or_update_iertr::future<> with_pg( ShardServices &shard_services, Ref<PG> pg); CommonPGPipeline& client_pp(); private: object_stat_sum_t delta_stats; remove_or_update_iertr::future<> remove_clone( ObjectContextRef obc, ObjectContextRef head_obc, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries); void remove_head_whiteout( ObjectContextRef obc, ObjectContextRef head_obc, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries); interruptible_future<> adjust_snaps( ObjectContextRef obc, ObjectContextRef head_obc, const std::set<snapid_t>& new_snaps, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries); void update_head( ObjectContextRef obc, ObjectContextRef head_obc, ceph::os::Transaction& txn, std::vector<pg_log_entry_t>& log_entries); using remove_or_update_ret_t = std::pair<ceph::os::Transaction, std::vector<pg_log_entry_t>>; remove_or_update_iertr::future<remove_or_update_ret_t> remove_or_update(ObjectContextRef obc, ObjectContextRef head_obc); // we don't need to synchronize with other instances started by // SnapTrimEvent; it's here for the sake of op tracking. struct WaitRepop : OrderedConcurrentPhaseT<WaitRepop> { static constexpr auto type_name = "SnapTrimObjSubEvent::wait_repop"; } wait_repop; Ref<PG> pg; PipelineHandle handle; osd_op_params_t osd_op_p; const hobject_t coid; const snapid_t snap_to_trim; public: PipelineHandle& get_handle() { return handle; } std::tuple< StartEvent, CommonPGPipeline::WaitForActive::BlockingEvent, PGActivationBlocker::BlockingEvent, CommonPGPipeline::RecoverMissing::BlockingEvent, CommonPGPipeline::GetOBC::BlockingEvent, CommonPGPipeline::Process::BlockingEvent, WaitRepop::BlockingEvent, CompletionEvent > tracking_events; }; } // namespace crimson::osd #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::osd::SnapTrimEvent> : fmt::ostream_formatter {}; template <> struct fmt::formatter<crimson::osd::SnapTrimObjSubEvent> : fmt::ostream_formatter {}; #endif
6,538
30.287081
97
h
null
ceph-main/src/crimson/osd/osd_operations/common/pg_pipeline.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "osd/osd_op_util.h" #include "crimson/osd/osd_operation.h" namespace crimson::osd { class CommonPGPipeline { protected: friend class InternalClientRequest; friend class SnapTrimEvent; friend class SnapTrimObjSubEvent; struct WaitForActive : OrderedExclusivePhaseT<WaitForActive> { static constexpr auto type_name = "CommonPGPipeline:::wait_for_active"; } wait_for_active; struct RecoverMissing : OrderedExclusivePhaseT<RecoverMissing> { static constexpr auto type_name = "CommonPGPipeline::recover_missing"; } recover_missing; struct GetOBC : OrderedExclusivePhaseT<GetOBC> { static constexpr auto type_name = "CommonPGPipeline::get_obc"; } get_obc; struct Process : OrderedExclusivePhaseT<Process> { static constexpr auto type_name = "CommonPGPipeline::process"; } process; }; } // namespace crimson::osd
970
29.34375
75
h
null
ceph-main/src/crimson/osd/scheduler/mclock_scheduler.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <memory> #include <functional> #include "crimson/osd/scheduler/mclock_scheduler.h" #include "common/dout.h" namespace dmc = crimson::dmclock; using namespace std::placeholders; #define dout_context cct #define dout_subsys ceph_subsys_osd #undef dout_prefix #define dout_prefix *_dout namespace crimson::osd::scheduler { mClockScheduler::mClockScheduler(ConfigProxy &conf) : scheduler( std::bind(&mClockScheduler::ClientRegistry::get_info, &client_registry, _1), dmc::AtLimit::Allow, conf.get_val<double>("osd_mclock_scheduler_anticipation_timeout")) { conf.add_observer(this); client_registry.update_from_config(conf); } void mClockScheduler::ClientRegistry::update_from_config(const ConfigProxy &conf) { default_external_client_info.update( conf.get_val<double>("osd_mclock_scheduler_client_res"), conf.get_val<uint64_t>("osd_mclock_scheduler_client_wgt"), conf.get_val<double>("osd_mclock_scheduler_client_lim")); internal_client_infos[ static_cast<size_t>(scheduler_class_t::background_recovery)].update( conf.get_val<double>("osd_mclock_scheduler_background_recovery_res"), conf.get_val<uint64_t>("osd_mclock_scheduler_background_recovery_wgt"), conf.get_val<double>("osd_mclock_scheduler_background_recovery_lim")); internal_client_infos[ static_cast<size_t>(scheduler_class_t::background_best_effort)].update( conf.get_val<double>("osd_mclock_scheduler_background_best_effort_res"), conf.get_val<uint64_t>("osd_mclock_scheduler_background_best_effort_wgt"), conf.get_val<double>("osd_mclock_scheduler_background_best_effort_lim")); } const dmc::ClientInfo *mClockScheduler::ClientRegistry::get_external_client( const client_profile_id_t &client) const { auto ret = external_client_infos.find(client); if (ret == external_client_infos.end()) return &default_external_client_info; else return &(ret->second); } const dmc::ClientInfo *mClockScheduler::ClientRegistry::get_info( const scheduler_id_t &id) const { switch (id.class_id) { case scheduler_class_t::immediate: ceph_assert(0 == "Cannot schedule immediate"); return (dmc::ClientInfo*)nullptr; case scheduler_class_t::repop: case scheduler_class_t::client: return get_external_client(id.client_profile_id); default: ceph_assert(static_cast<size_t>(id.class_id) < internal_client_infos.size()); return &internal_client_infos[static_cast<size_t>(id.class_id)]; } } void mClockScheduler::dump(ceph::Formatter &f) const { } void mClockScheduler::enqueue(item_t&& item) { auto id = get_scheduler_id(item); auto cost = item.params.cost; if (scheduler_class_t::immediate == item.params.klass) { immediate.push_front(std::move(item)); } else { scheduler.add_request( std::move(item), id, cost); } } void mClockScheduler::enqueue_front(item_t&& item) { immediate.push_back(std::move(item)); // TODO: item may not be immediate, update mclock machinery to permit // putting the item back in the queue } item_t mClockScheduler::dequeue() { if (!immediate.empty()) { auto ret = std::move(immediate.back()); immediate.pop_back(); return ret; } else { mclock_queue_t::PullReq result = scheduler.pull_request(); if (result.is_future()) { ceph_assert( 0 == "Not implemented, user would have to be able to be woken up"); return std::move(*(item_t*)nullptr); } else if (result.is_none()) { ceph_assert( 0 == "Impossible, must have checked empty() first"); return std::move(*(item_t*)nullptr); } else { ceph_assert(result.is_retn()); auto &retn = result.get_retn(); return std::move(*retn.request); } } } const char** mClockScheduler::get_tracked_conf_keys() const { static const char* KEYS[] = { "osd_mclock_scheduler_client_res", "osd_mclock_scheduler_client_wgt", "osd_mclock_scheduler_client_lim", "osd_mclock_scheduler_background_recovery_res", "osd_mclock_scheduler_background_recovery_wgt", "osd_mclock_scheduler_background_recovery_lim", "osd_mclock_scheduler_background_best_effort_res", "osd_mclock_scheduler_background_best_effort_wgt", "osd_mclock_scheduler_background_best_effort_lim", NULL }; return KEYS; } void mClockScheduler::handle_conf_change( const ConfigProxy& conf, const std::set<std::string> &changed) { client_registry.update_from_config(conf); } }
4,857
28.26506
81
cc
null
ceph-main/src/crimson/osd/scheduler/mclock_scheduler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <ostream> #include <map> #include <vector> #include "boost/variant.hpp" #include "dmclock/src/dmclock_server.h" #include "crimson/osd/scheduler/scheduler.h" #include "common/config.h" #include "common/ceph_context.h" namespace crimson::osd::scheduler { using client_id_t = uint64_t; using profile_id_t = uint64_t; struct client_profile_id_t { client_id_t client_id; profile_id_t profile_id; auto operator<=>(const client_profile_id_t&) const = default; }; struct scheduler_id_t { scheduler_class_t class_id; client_profile_id_t client_profile_id; auto operator<=>(const scheduler_id_t&) const = default; }; /** * Scheduler implementation based on mclock. * * TODO: explain configs */ class mClockScheduler : public Scheduler, md_config_obs_t { class ClientRegistry { std::array< crimson::dmclock::ClientInfo, static_cast<size_t>(scheduler_class_t::client) > internal_client_infos = { // Placeholder, gets replaced with configured values crimson::dmclock::ClientInfo(1, 1, 1), crimson::dmclock::ClientInfo(1, 1, 1) }; crimson::dmclock::ClientInfo default_external_client_info = {1, 1, 1}; std::map<client_profile_id_t, crimson::dmclock::ClientInfo> external_client_infos; const crimson::dmclock::ClientInfo *get_external_client( const client_profile_id_t &client) const; public: void update_from_config(const ConfigProxy &conf); const crimson::dmclock::ClientInfo *get_info( const scheduler_id_t &id) const; } client_registry; using mclock_queue_t = crimson::dmclock::PullPriorityQueue< scheduler_id_t, item_t, true, true, 2>; mclock_queue_t scheduler; std::list<item_t> immediate; static scheduler_id_t get_scheduler_id(const item_t &item) { return scheduler_id_t{ item.params.klass, client_profile_id_t{ item.params.owner, 0 } }; } public: mClockScheduler(ConfigProxy &conf); // Enqueue op in the back of the regular queue void enqueue(item_t &&item) final; // Enqueue the op in the front of the regular queue void enqueue_front(item_t &&item) final; // Return an op to be dispatch item_t dequeue() final; // Returns if the queue is empty bool empty() const final { return immediate.empty() && scheduler.empty(); } // Formatted output of the queue void dump(ceph::Formatter &f) const final; void print(std::ostream &ostream) const final { ostream << "mClockScheduler"; } const char** get_tracked_conf_keys() const final; void handle_conf_change(const ConfigProxy& conf, const std::set<std::string> &changed) final; }; }
3,071
23.380952
74
h
null
ceph-main/src/crimson/osd/scheduler/scheduler.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <ostream> #include <seastar/core/print.hh> #include "crimson/osd/scheduler/scheduler.h" #include "crimson/osd/scheduler/mclock_scheduler.h" #include "common/WeightedPriorityQueue.h" namespace crimson::osd::scheduler { std::ostream &operator<<(std::ostream &lhs, const scheduler_class_t &c) { switch (c) { case scheduler_class_t::background_best_effort: return lhs << "background_best_effort"; case scheduler_class_t::background_recovery: return lhs << "background_recovery"; case scheduler_class_t::client: return lhs << "client"; case scheduler_class_t::repop: return lhs << "repop"; case scheduler_class_t::immediate: return lhs << "immediate"; default: return lhs; } } /** * Implements Scheduler in terms of OpQueue * * Templated on queue type to avoid dynamic dispatch, T should implement * OpQueue<Scheduleritem_t, client_t>. This adapter is mainly responsible for * the boilerplate priority cutoff/strict concept which is needed for * OpQueue based implementations. */ template <typename T> class ClassedOpQueueScheduler final : public Scheduler { const scheduler_class_t cutoff; T queue; using priority_t = uint64_t; std::array< priority_t, static_cast<size_t>(scheduler_class_t::immediate) > priority_map = { // Placeholder, gets replaced with configured values 0, 0, 0 }; static scheduler_class_t get_io_prio_cut(ConfigProxy &conf) { if (conf.get_val<std::string>("osd_op_queue_cut_off") == "debug_random") { srand(time(NULL)); return (rand() % 2 < 1) ? scheduler_class_t::repop : scheduler_class_t::immediate; } else if (conf.get_val<std::string>("osd_op_queue_cut_off") == "high") { return scheduler_class_t::immediate; } else { return scheduler_class_t::repop; } } bool use_strict(scheduler_class_t kl) const { return static_cast<uint8_t>(kl) >= static_cast<uint8_t>(cutoff); } priority_t get_priority(scheduler_class_t kl) const { ceph_assert(static_cast<size_t>(kl) < static_cast<size_t>(scheduler_class_t::immediate)); return priority_map[static_cast<size_t>(kl)]; } public: template <typename... Args> ClassedOpQueueScheduler(ConfigProxy &conf, Args&&... args) : cutoff(get_io_prio_cut(conf)), queue(std::forward<Args>(args)...) { priority_map[ static_cast<size_t>(scheduler_class_t::background_best_effort) ] = conf.get_val<uint64_t>("osd_scrub_priority"); priority_map[ static_cast<size_t>(scheduler_class_t::background_recovery) ] = conf.get_val<uint64_t>("osd_recovery_op_priority"); priority_map[ static_cast<size_t>(scheduler_class_t::client) ] = conf.get_val<uint64_t>("osd_client_op_priority"); priority_map[ static_cast<size_t>(scheduler_class_t::repop) ] = conf.get_val<uint64_t>("osd_client_op_priority"); } void enqueue(item_t &&item) final { if (use_strict(item.params.klass)) queue.enqueue_strict( item.params.owner, get_priority(item.params.klass), std::move(item)); else queue.enqueue( item.params.owner, get_priority(item.params.klass), item.params.cost, std::move(item)); } void enqueue_front(item_t &&item) final { if (use_strict(item.params.klass)) queue.enqueue_strict_front( item.params.owner, get_priority(item.params.klass), std::move(item)); else queue.enqueue_front( item.params.owner, get_priority(item.params.klass), item.params.cost, std::move(item)); } bool empty() const final { return queue.empty(); } item_t dequeue() final { return queue.dequeue(); } void dump(ceph::Formatter &f) const final { return queue.dump(&f); } void print(std::ostream &out) const final { out << "ClassedOpQueueScheduler(queue="; queue.print(out); out << ", cutoff=" << cutoff << ")"; } ~ClassedOpQueueScheduler() final {}; }; SchedulerRef make_scheduler(ConfigProxy &conf) { const std::string _type = conf.get_val<std::string>("osd_op_queue"); const std::string *type = &_type; if (*type == "debug_random") { static const std::string index_lookup[] = { "mclock_scheduler", "wpq" }; srand(time(NULL)); unsigned which = rand() % (sizeof(index_lookup) / sizeof(index_lookup[0])); type = &index_lookup[which]; } if (*type == "wpq" ) { // default is 'wpq' return std::make_unique< ClassedOpQueueScheduler<WeightedPriorityQueue<item_t, client_t>>>( conf, conf.get_val<uint64_t>("osd_op_pq_max_tokens_per_priority"), conf->osd_op_pq_min_cost ); } else if (*type == "mclock_scheduler") { return std::make_unique<mClockScheduler>(conf); } else { ceph_assert("Invalid choice of wq" == 0); return std::unique_ptr<mClockScheduler>(); } } std::ostream &operator<<(std::ostream &lhs, const Scheduler &rhs) { rhs.print(lhs); return lhs; } }
5,274
27.983516
79
cc
null
ceph-main/src/crimson/osd/scheduler/scheduler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <seastar/core/future.hh> #include <ostream> #include "crimson/common/config_proxy.h" namespace crimson::osd::scheduler { enum class scheduler_class_t : uint8_t { background_best_effort = 0, background_recovery, client, repop, immediate, }; std::ostream &operator<<(std::ostream &, const scheduler_class_t &); using client_t = uint64_t; using cost_t = uint64_t; struct params_t { cost_t cost = 1; client_t owner; scheduler_class_t klass; }; struct item_t { params_t params; seastar::promise<> wake; }; /** * Base interface for classes responsible for choosing * op processing order in the OSD. */ class Scheduler { public: // Enqueue op for scheduling virtual void enqueue(item_t &&item) = 0; // Enqueue op for processing as though it were enqueued prior // to other items already scheduled. virtual void enqueue_front(item_t &&item) = 0; // Returns true iff there are no ops scheduled virtual bool empty() const = 0; // Return next op to be processed virtual item_t dequeue() = 0; // Dump formatted representation for the queue virtual void dump(ceph::Formatter &f) const = 0; // Print human readable brief description with relevant parameters virtual void print(std::ostream &out) const = 0; // Destructor virtual ~Scheduler() {}; }; std::ostream &operator<<(std::ostream &lhs, const Scheduler &); using SchedulerRef = std::unique_ptr<Scheduler>; SchedulerRef make_scheduler(ConfigProxy &); }
1,891
21.795181
70
h
null
ceph-main/src/crimson/tools/perf_async_msgr.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- #include <boost/program_options/variables_map.hpp> #include <boost/program_options/parsers.hpp> #include "auth/Auth.h" #include "global/global_init.h" #include "msg/Dispatcher.h" #include "msg/Messenger.h" #include "messages/MOSDOp.h" #include "auth/DummyAuth.h" namespace { constexpr int CEPH_OSD_PROTOCOL = 10; struct Server { Server(CephContext* cct, unsigned msg_len) : dummy_auth(cct), dispatcher(cct, msg_len) { msgr.reset(Messenger::create(cct, "async", entity_name_t::OSD(0), "server", 0)); dummy_auth.auth_registry.refresh_config(); msgr->set_cluster_protocol(CEPH_OSD_PROTOCOL); msgr->set_default_policy(Messenger::Policy::stateless_server(0)); msgr->set_auth_client(&dummy_auth); msgr->set_auth_server(&dummy_auth); } DummyAuthClientServer dummy_auth; std::unique_ptr<Messenger> msgr; struct ServerDispatcher : Dispatcher { unsigned msg_len = 0; bufferlist msg_data; ServerDispatcher(CephContext* cct, unsigned msg_len) : Dispatcher(cct), msg_len(msg_len) { msg_data.append_zero(msg_len); } bool ms_can_fast_dispatch_any() const override { return true; } bool ms_can_fast_dispatch(const Message* m) const override { return m->get_type() == CEPH_MSG_OSD_OP; } void ms_fast_dispatch(Message* m) override { ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); const static pg_t pgid; const static object_locator_t oloc; const static hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(), pgid.pool(), oloc.nspace); static spg_t spgid(pgid); MOSDOp *rep = new MOSDOp(0, 0, hobj, spgid, 0, 0, 0); bufferlist data(msg_data); rep->write(0, msg_len, data); rep->set_tid(m->get_tid()); m->get_connection()->send_message(rep); m->put(); } bool ms_dispatch(Message*) override { ceph_abort(); } bool ms_handle_reset(Connection*) override { return true; } void ms_handle_remote_reset(Connection*) override { } bool ms_handle_refused(Connection*) override { return true; } } dispatcher; }; } static void run(CephContext* cct, entity_addr_t addr, unsigned bs) { std::cout << "async server listening at " << addr << std::endl; Server server{cct, bs}; server.msgr->bind(addr); server.msgr->add_dispatcher_head(&server.dispatcher); server.msgr->start(); server.msgr->wait(); } int main(int argc, char** argv) { namespace po = boost::program_options; po::options_description desc{"Allowed options"}; desc.add_options() ("help,h", "show help message") ("addr", po::value<std::string>()->default_value("v2:127.0.0.1:9010"), "server address(crimson only supports msgr v2 protocol)") ("bs", po::value<unsigned>()->default_value(0), "server block size") ("crc-enabled", po::value<bool>()->default_value(false), "enable CRC checks") ("threads", po::value<unsigned>()->default_value(3), "async messenger worker threads"); po::variables_map vm; std::vector<std::string> unrecognized_options; try { auto parsed = po::command_line_parser(argc, argv) .options(desc) .allow_unregistered() .run(); po::store(parsed, vm); if (vm.count("help")) { std::cout << desc << std::endl; return 0; } po::notify(vm); unrecognized_options = po::collect_unrecognized(parsed.options, po::include_positional); } catch(const po::error& e) { std::cerr << "error: " << e.what() << std::endl; return 1; } auto addr = vm["addr"].as<std::string>(); entity_addr_t target_addr; target_addr.parse(addr.c_str(), nullptr); ceph_assert_always(target_addr.is_msgr2()); auto bs = vm["bs"].as<unsigned>(); auto crc_enabled = vm["crc-enabled"].as<bool>(); auto worker_threads = vm["threads"].as<unsigned>(); std::vector<const char*> args(argv, argv + argc); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_MON_CONFIG); common_init_finish(cct.get()); if (crc_enabled) { cct->_conf.set_val("ms_crc_header", "true"); cct->_conf.set_val("ms_crc_data", "true"); } else { cct->_conf.set_val("ms_crc_header", "false"); cct->_conf.set_val("ms_crc_data", "false"); } cct->_conf.set_val("ms_async_op_threads", fmt::format("{}", worker_threads)); std::cout << "server[" << addr << "](bs=" << bs << ", crc_enabled=" << crc_enabled << ", worker_threads=" << worker_threads << std::endl; run(cct.get(), target_addr, bs); }
4,761
30.328947
92
cc
null
ceph-main/src/crimson/tools/perf_crimson_msgr.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <map> #include <random> #include <boost/program_options.hpp> #include <boost/iterator/counting_iterator.hpp> #include <seastar/core/app-template.hh> #include <seastar/core/do_with.hh> #include <seastar/core/future-util.hh> #include <seastar/core/lowres_clock.hh> #include <seastar/core/reactor.hh> #include <seastar/core/sleep.hh> #include <seastar/core/semaphore.hh> #include <seastar/core/smp.hh> #include <seastar/core/thread.hh> #include "common/ceph_time.h" #include "messages/MOSDOp.h" #include "crimson/auth/DummyAuth.h" #include "crimson/common/log.h" #include "crimson/common/config_proxy.h" #include "crimson/net/Connection.h" #include "crimson/net/Dispatcher.h" #include "crimson/net/Messenger.h" #include "crimson/osd/stop_signal.h" using namespace std; using namespace std::chrono_literals; using lowres_clock_t = seastar::lowres_system_clock; namespace bpo = boost::program_options; namespace { template<typename Message> using Ref = boost::intrusive_ptr<Message>; seastar::logger& logger() { return crimson::get_logger(ceph_subsys_ms); } template <typename T, typename... Args> seastar::future<T*> create_sharded(Args... args) { // seems we should only construct/stop shards on #0 return seastar::smp::submit_to(0, [=] { auto sharded_obj = seastar::make_lw_shared<seastar::sharded<T>>(); return sharded_obj->start(args...).then([sharded_obj]() { seastar::engine().at_exit([sharded_obj]() { return sharded_obj->stop().then([sharded_obj] {}); }); return sharded_obj.get(); }); }).then([] (seastar::sharded<T> *ptr_shard) { // return the pointer valid for the caller CPU return &ptr_shard->local(); }); } double get_reactor_utilization() { auto &value_map = seastar::metrics::impl::get_value_map(); auto found = value_map.find("reactor_utilization"); assert(found != value_map.end()); auto &[full_name, metric_family] = *found; std::ignore = full_name; assert(metric_family.size() == 1); const auto& [labels, metric] = *metric_family.begin(); std::ignore = labels; auto value = (*metric)(); return value.ui(); } enum class perf_mode_t { both, client, server }; struct client_config { entity_addr_t server_addr; unsigned block_size; unsigned ramptime; unsigned msgtime; unsigned num_clients; unsigned num_conns; unsigned depth; bool skip_core_0; std::string str() const { std::ostringstream out; out << "client[>> " << server_addr << "](bs=" << block_size << ", ramptime=" << ramptime << ", msgtime=" << msgtime << ", num_clients=" << num_clients << ", num_conns=" << num_conns << ", depth=" << depth << ", skip_core_0=" << skip_core_0 << ")"; return out.str(); } static client_config load(bpo::variables_map& options) { client_config conf; entity_addr_t addr; ceph_assert(addr.parse(options["server-addr"].as<std::string>().c_str(), nullptr)); ceph_assert_always(addr.is_msgr2()); conf.server_addr = addr; conf.block_size = options["client-bs"].as<unsigned>(); conf.ramptime = options["ramptime"].as<unsigned>(); conf.msgtime = options["msgtime"].as<unsigned>(); conf.num_clients = options["clients"].as<unsigned>(); ceph_assert_always(conf.num_clients > 0); conf.num_conns = options["conns-per-client"].as<unsigned>(); ceph_assert_always(conf.num_conns > 0); conf.depth = options["depth"].as<unsigned>(); conf.skip_core_0 = options["client-skip-core-0"].as<bool>(); return conf; } }; struct server_config { entity_addr_t addr; unsigned block_size; bool is_fixed_cpu; unsigned core; std::string str() const { std::ostringstream out; out << "server[" << addr << "](bs=" << block_size << ", is_fixed_cpu=" << is_fixed_cpu << ", core=" << core << ")"; return out.str(); } static server_config load(bpo::variables_map& options) { server_config conf; entity_addr_t addr; ceph_assert(addr.parse(options["server-addr"].as<std::string>().c_str(), nullptr)); ceph_assert_always(addr.is_msgr2()); conf.addr = addr; conf.block_size = options["server-bs"].as<unsigned>(); conf.is_fixed_cpu = options["server-fixed-cpu"].as<bool>(); conf.core = options["server-core"].as<unsigned>(); return conf; } }; const unsigned SAMPLE_RATE = 256; static seastar::future<> run( perf_mode_t mode, const client_config& client_conf, const server_config& server_conf, bool crc_enabled) { struct test_state { struct Server final : public crimson::net::Dispatcher, public seastar::peering_sharded_service<Server> { // available only in msgr_sid crimson::net::MessengerRef msgr; crimson::auth::DummyAuthClientServer dummy_auth; const seastar::shard_id msgr_sid; std::string lname; bool is_fixed_cpu = true; bool is_stopped = false; std::optional<seastar::future<>> fut_report; unsigned conn_count = 0; unsigned msg_count = 0; MessageRef last_msg; // available in all shards unsigned msg_len; bufferlist msg_data; Server(seastar::shard_id msgr_sid, unsigned msg_len, bool needs_report) : msgr_sid{msgr_sid}, msg_len{msg_len} { lname = fmt::format("server@{}", msgr_sid); msg_data.append_zero(msg_len); if (seastar::this_shard_id() == msgr_sid && needs_report) { start_report(); } } void ms_handle_connect( crimson::net::ConnectionRef, seastar::shard_id) override { ceph_abort("impossible, server won't connect"); } void ms_handle_accept( crimson::net::ConnectionRef, seastar::shard_id new_shard, bool is_replace) override { ceph_assert_always(new_shard == seastar::this_shard_id()); auto &server = container().local(); ++server.conn_count; } void ms_handle_reset( crimson::net::ConnectionRef, bool) override { auto &server = container().local(); --server.conn_count; } std::optional<seastar::future<>> ms_dispatch( crimson::net::ConnectionRef c, MessageRef m) override { assert(c->get_shard_id() == seastar::this_shard_id()); ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); auto &server = container().local(); // server replies with MOSDOp to generate server-side write workload const static pg_t pgid; const static object_locator_t oloc; const static hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(), pgid.pool(), oloc.nspace); static spg_t spgid(pgid); auto rep = crimson::make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0); bufferlist data(server.msg_data); rep->write(0, server.msg_len, data); rep->set_tid(m->get_tid()); ++server.msg_count; std::ignore = c->send(std::move(rep)); if (server.msg_count % 16 == 0) { server.last_msg = std::move(m); } return {seastar::now()}; } seastar::future<> init(const entity_addr_t& addr, bool is_fixed_cpu) { return container().invoke_on( msgr_sid, [addr, is_fixed_cpu](auto &server) { // server msgr is always with nonce 0 server.msgr = crimson::net::Messenger::create( entity_name_t::OSD(server.msgr_sid), server.lname, 0, is_fixed_cpu); server.msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0)); server.msgr->set_auth_client(&server.dummy_auth); server.msgr->set_auth_server(&server.dummy_auth); server.is_fixed_cpu = is_fixed_cpu; return server.msgr->bind(entity_addrvec_t{addr} ).safe_then([&server] { return server.msgr->start({&server}); }, crimson::net::Messenger::bind_ertr::all_same_way( [addr] (const std::error_code& e) { logger().error("Server: " "there is another instance running at {}", addr); ceph_abort(); })); }); } seastar::future<> shutdown() { logger().info("{} shutdown...", lname); return container().invoke_on( msgr_sid, [](auto &server) { server.is_stopped = true; ceph_assert(server.msgr); server.msgr->stop(); return server.msgr->shutdown( ).then([&server] { if (server.fut_report.has_value()) { return std::move(server.fut_report.value()); } else { return seastar::now(); } }); }); } private: struct ShardReport { unsigned msg_count = 0; // per-interval metrics double reactor_utilization; unsigned conn_count = 0; int msg_size = 0; unsigned msg_count_interval = 0; }; // should not be called frequently to impact performance void get_report(ShardReport& last) { unsigned last_msg_count = last.msg_count; int msg_size = -1; if (last_msg) { auto msg = boost::static_pointer_cast<MOSDOp>(last_msg); msg->finish_decode(); ceph_assert_always(msg->ops.size() == 1); msg_size = msg->ops[0].op.extent.length; last_msg.reset(); } last.msg_count = msg_count; last.reactor_utilization = get_reactor_utilization(); last.conn_count = conn_count; last.msg_size = msg_size; last.msg_count_interval = msg_count - last_msg_count; } struct TimerReport { unsigned elapsed = 0u; mono_time start_time = mono_clock::zero(); std::vector<ShardReport> reports; TimerReport(unsigned shards) : reports(shards) {} }; void start_report() { seastar::promise<> pr_report; fut_report = pr_report.get_future(); seastar::do_with( TimerReport(seastar::smp::count), [this](auto &report) { return seastar::do_until( [this] { return is_stopped; }, [&report, this] { return seastar::sleep(2s ).then([&report, this] { report.elapsed += 2; if (is_fixed_cpu) { return seastar::smp::submit_to(msgr_sid, [&report, this] { auto &server = container().local(); server.get_report(report.reports[seastar::this_shard_id()]); }).then([&report, this] { auto now = mono_clock::now(); auto prv = report.start_time; report.start_time = now; if (prv == mono_clock::zero()) { // cannot compute duration return; } std::chrono::duration<double> duration_d = now - prv; double duration = duration_d.count(); auto &ireport = report.reports[msgr_sid]; double iops = ireport.msg_count_interval / duration; double throughput_MB = -1; if (ireport.msg_size >= 0) { throughput_MB = iops * ireport.msg_size / 1048576; } std::ostringstream sout; sout << setfill(' ') << report.elapsed << "(" << std::setw(5) << duration << ") " << std::setw(9) << iops << "IOPS " << std::setw(8) << throughput_MB << "MiB/s " << ireport.reactor_utilization << "(" << ireport.conn_count << ")"; std::cout << sout.str() << std::endl; }); } else { return seastar::smp::invoke_on_all([&report, this] { auto &server = container().local(); server.get_report(report.reports[seastar::this_shard_id()]); }).then([&report, this] { auto now = mono_clock::now(); auto prv = report.start_time; report.start_time = now; if (prv == mono_clock::zero()) { // cannot compute duration return; } std::chrono::duration<double> duration_d = now - prv; double duration = duration_d.count(); unsigned num_msgs = 0; // -1 means unavailable, -2 means mismatch int msg_size = -1; for (auto &i : report.reports) { if (i.msg_size >= 0) { if (msg_size == -2) { // pass } else if (msg_size == -1) { msg_size = i.msg_size; } else { if (msg_size != i.msg_size) { msg_size = -2; } } } num_msgs += i.msg_count_interval; } double iops = num_msgs / duration; double throughput_MB = msg_size; if (msg_size >= 0) { throughput_MB = iops * msg_size / 1048576; } std::ostringstream sout; sout << setfill(' ') << report.elapsed << "(" << std::setw(5) << duration << ") " << std::setw(9) << iops << "IOPS " << std::setw(8) << throughput_MB << "MiB/s "; for (auto &i : report.reports) { sout << i.reactor_utilization << "(" << i.conn_count << ") "; } std::cout << sout.str() << std::endl; }); } }); } ); }).then([this] { logger().info("report is stopped!"); }).forward_to(std::move(pr_report)); } }; struct Client final : public crimson::net::Dispatcher, public seastar::peering_sharded_service<Client> { struct ConnStats { mono_time connecting_time = mono_clock::zero(); mono_time connected_time = mono_clock::zero(); unsigned received_count = 0u; mono_time start_time = mono_clock::zero(); unsigned start_count = 0u; unsigned sampled_count = 0u; double sampled_total_lat_s = 0.0; // for reporting only mono_time finish_time = mono_clock::zero(); void start_connecting() { connecting_time = mono_clock::now(); } void finish_connecting() { ceph_assert_always(connected_time == mono_clock::zero()); connected_time = mono_clock::now(); } void start_collect() { ceph_assert_always(connected_time != mono_clock::zero()); start_time = mono_clock::now(); start_count = received_count; sampled_count = 0u; sampled_total_lat_s = 0.0; finish_time = mono_clock::zero(); } void prepare_summary(const ConnStats &current) { *this = current; finish_time = mono_clock::now(); } }; struct PeriodStats { mono_time start_time = mono_clock::zero(); unsigned start_count = 0u; unsigned sampled_count = 0u; double sampled_total_lat_s = 0.0; // for reporting only mono_time finish_time = mono_clock::zero(); unsigned finish_count = 0u; unsigned depth = 0u; void start_collect(unsigned received_count) { start_time = mono_clock::now(); start_count = received_count; sampled_count = 0u; sampled_total_lat_s = 0.0; } void reset_period( unsigned received_count, unsigned _depth, PeriodStats &snapshot) { snapshot.start_time = start_time; snapshot.start_count = start_count; snapshot.sampled_count = sampled_count; snapshot.sampled_total_lat_s = sampled_total_lat_s; snapshot.finish_time = mono_clock::now(); snapshot.finish_count = received_count; snapshot.depth = _depth; start_collect(received_count); } }; struct JobReport { std::string name; unsigned depth = 0; double connect_time_s = 0; unsigned total_msgs = 0; double messaging_time_s = 0; double latency_ms = 0; double iops = 0; double throughput_mbps = 0; void account(const JobReport &stats) { depth += stats.depth; connect_time_s += stats.connect_time_s; total_msgs += stats.total_msgs; messaging_time_s += stats.messaging_time_s; latency_ms += stats.latency_ms; iops += stats.iops; throughput_mbps += stats.throughput_mbps; } void report() const { auto str = fmt::format( "{}(depth={}):\n" " connect time: {:08f}s\n" " messages received: {}\n" " messaging time: {:08f}s\n" " latency: {:08f}ms\n" " IOPS: {:08f}\n" " out throughput: {:08f}MB/s", name, depth, connect_time_s, total_msgs, messaging_time_s, latency_ms, iops, throughput_mbps); std::cout << str << std::endl; } }; struct ConnectionPriv : public crimson::net::Connection::user_private_t { unsigned index; ConnectionPriv(unsigned i) : index{i} {} }; struct ConnState { crimson::net::MessengerRef msgr; ConnStats conn_stats; PeriodStats period_stats; seastar::semaphore depth; std::vector<lowres_clock_t::time_point> time_msgs_sent; unsigned sent_count = 0u; crimson::net::ConnectionRef active_conn; bool stop_send = false; seastar::promise<JobReport> stopped_send_promise; ConnState(std::size_t _depth) : depth{_depth}, time_msgs_sent{_depth, lowres_clock_t::time_point::min()} {} unsigned get_current_units() const { ceph_assert(depth.available_units() >= 0); return depth.current(); } seastar::future<JobReport> stop_dispatch_messages() { stop_send = true; depth.broken(DepthBroken()); return stopped_send_promise.get_future(); } }; const seastar::shard_id sid; const unsigned id; const std::optional<unsigned> server_sid; const unsigned num_clients; const unsigned num_conns; const unsigned msg_len; bufferlist msg_data; const unsigned nr_depth; crimson::auth::DummyAuthClientServer dummy_auth; std::vector<ConnState> conn_states; Client(unsigned num_clients, unsigned num_conns, unsigned msg_len, unsigned _depth, std::optional<unsigned> server_sid) : sid{seastar::this_shard_id()}, id{sid + num_clients - seastar::smp::count}, server_sid{server_sid}, num_clients{num_clients}, num_conns{num_conns}, msg_len{msg_len}, nr_depth{_depth} { if (is_active()) { for (unsigned i = 0; i < num_conns; ++i) { conn_states.emplace_back(nr_depth); } } msg_data.append_zero(msg_len); } std::string get_name(unsigned i) { return fmt::format("client{}Conn{}@{}", id, i, sid); } void ms_handle_connect( crimson::net::ConnectionRef conn, seastar::shard_id new_shard) override { ceph_assert_always(new_shard == seastar::this_shard_id()); assert(is_active()); unsigned index = static_cast<ConnectionPriv&>(conn->get_user_private()).index; auto &conn_state = conn_states[index]; conn_state.conn_stats.finish_connecting(); } std::optional<seastar::future<>> ms_dispatch( crimson::net::ConnectionRef conn, MessageRef m) override { assert(is_active()); // server replies with MOSDOp to generate server-side write workload ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); unsigned index = static_cast<ConnectionPriv&>(conn->get_user_private()).index; assert(index < num_conns); auto &conn_state = conn_states[index]; auto msg_id = m->get_tid(); if (msg_id % SAMPLE_RATE == 0) { auto msg_index = msg_id % conn_state.time_msgs_sent.size(); ceph_assert(conn_state.time_msgs_sent[msg_index] != lowres_clock_t::time_point::min()); std::chrono::duration<double> cur_latency = lowres_clock_t::now() - conn_state.time_msgs_sent[msg_index]; conn_state.conn_stats.sampled_total_lat_s += cur_latency.count(); ++(conn_state.conn_stats.sampled_count); conn_state.period_stats.sampled_total_lat_s += cur_latency.count(); ++(conn_state.period_stats.sampled_count); conn_state.time_msgs_sent[msg_index] = lowres_clock_t::time_point::min(); } ++(conn_state.conn_stats.received_count); conn_state.depth.signal(1); return {seastar::now()}; } // should start messenger at this shard? bool is_active() { ceph_assert(seastar::this_shard_id() == sid); return sid + num_clients >= seastar::smp::count; } seastar::future<> init() { return container().invoke_on_all([](auto& client) { if (client.is_active()) { return seastar::do_for_each( boost::make_counting_iterator(0u), boost::make_counting_iterator(client.num_conns), [&client](auto i) { auto &conn_state = client.conn_states[i]; std::string name = client.get_name(i); conn_state.msgr = crimson::net::Messenger::create( entity_name_t::OSD(client.id * client.num_conns + i), name, client.id * client.num_conns + i, true); conn_state.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0)); conn_state.msgr->set_auth_client(&client.dummy_auth); conn_state.msgr->set_auth_server(&client.dummy_auth); return conn_state.msgr->start({&client}); }); } return seastar::now(); }); } seastar::future<> shutdown() { return seastar::do_with( std::vector<JobReport>(num_clients * num_conns), [this](auto &all_stats) { return container().invoke_on_all([&all_stats](auto& client) { if (!client.is_active()) { return seastar::now(); } return seastar::parallel_for_each( boost::make_counting_iterator(0u), boost::make_counting_iterator(client.num_conns), [&all_stats, &client](auto i) { logger().info("{} shutdown...", client.get_name(i)); auto &conn_state = client.conn_states[i]; return conn_state.stop_dispatch_messages( ).then([&all_stats, &client, i](auto stats) { all_stats[client.id * client.num_conns + i] = stats; }); }).then([&client] { return seastar::do_for_each( boost::make_counting_iterator(0u), boost::make_counting_iterator(client.num_conns), [&client](auto i) { auto &conn_state = client.conn_states[i]; ceph_assert(conn_state.msgr); conn_state.msgr->stop(); return conn_state.msgr->shutdown(); }); }); }).then([&all_stats, this] { auto nr_jobs = all_stats.size(); JobReport summary; std::vector<JobReport> clients(num_clients); for (unsigned i = 0; i < nr_jobs; ++i) { auto &stats = all_stats[i]; stats.report(); clients[i / num_conns].account(stats); summary.account(stats); } std::cout << std::endl; std::cout << "per client:" << std::endl; for (unsigned i = 0; i < num_clients; ++i) { auto &stats = clients[i]; stats.name = fmt::format("client{}", i); stats.connect_time_s /= num_conns; stats.messaging_time_s /= num_conns; stats.latency_ms /= num_conns; stats.report(); } std::cout << std::endl; summary.name = fmt::format("all", nr_jobs); summary.connect_time_s /= nr_jobs; summary.messaging_time_s /= nr_jobs; summary.latency_ms /= nr_jobs; summary.report(); }); }); } seastar::future<> connect_wait_verify(const entity_addr_t& peer_addr) { return container().invoke_on_all([peer_addr](auto& client) { // start clients in active cores if (client.is_active()) { for (unsigned i = 0; i < client.num_conns; ++i) { auto &conn_state = client.conn_states[i]; conn_state.conn_stats.start_connecting(); conn_state.active_conn = conn_state.msgr->connect(peer_addr, entity_name_t::TYPE_OSD); conn_state.active_conn->set_user_private( std::make_unique<ConnectionPriv>(i)); } // make sure handshake won't hurt the performance return seastar::sleep(1s).then([&client] { for (unsigned i = 0; i < client.num_conns; ++i) { auto &conn_state = client.conn_states[i]; if (conn_state.conn_stats.connected_time == mono_clock::zero()) { logger().error("\n{} not connected after 1s!\n", client.get_name(i)); ceph_assert(false); } } }); } return seastar::now(); }); } private: class TimerReport { private: const unsigned num_clients; const unsigned num_conns; const unsigned msgtime; const unsigned bytes_of_block; unsigned elapsed = 0u; std::vector<PeriodStats> snaps; std::vector<ConnStats> summaries; std::vector<double> client_reactor_utilizations; std::optional<double> server_reactor_utilization; public: TimerReport(unsigned num_clients, unsigned num_conns, unsigned msgtime, unsigned bs) : num_clients{num_clients}, num_conns{num_conns}, msgtime{msgtime}, bytes_of_block{bs}, snaps{num_clients * num_conns}, summaries{num_clients * num_conns}, client_reactor_utilizations(num_clients) {} unsigned get_elapsed() const { return elapsed; } PeriodStats& get_snap(unsigned client_id, unsigned i) { return snaps[client_id * num_conns + i]; } ConnStats& get_summary(unsigned client_id, unsigned i) { return summaries[client_id * num_conns + i]; } void set_client_reactor_utilization(unsigned client_id, double ru) { client_reactor_utilizations[client_id] = ru; } void set_server_reactor_utilization(double ru) { server_reactor_utilization = ru; } bool should_stop() const { return elapsed >= msgtime; } seastar::future<> ticktock() { return seastar::sleep(1s).then([this] { ++elapsed; }); } void report_header() const { std::ostringstream sout; sout << std::setfill(' ') << std::setw(6) << "sec" << std::setw(7) << "depth" << std::setw(10) << "IOPS" << std::setw(9) << "MB/s" << std::setw(9) << "lat(ms)"; std::cout << sout.str() << std::endl; } void report_period() { std::chrono::duration<double> elapsed_d = 0s; unsigned depth = 0u; unsigned ops = 0u; unsigned sampled_count = 0u; double sampled_total_lat_s = 0.0; for (const auto& snap: snaps) { elapsed_d += (snap.finish_time - snap.start_time); depth += snap.depth; ops += (snap.finish_count - snap.start_count); sampled_count += snap.sampled_count; sampled_total_lat_s += snap.sampled_total_lat_s; } double elapsed_s = elapsed_d.count() / (num_clients * num_conns); double iops = ops/elapsed_s; std::ostringstream sout; sout << setfill(' ') << std::setw(5) << elapsed_s << " " << std::setw(6) << depth << " " << std::setw(9) << iops << " " << std::setw(8) << iops * bytes_of_block / 1048576 << " " << std::setw(8) << (sampled_total_lat_s / sampled_count * 1000) << " -- "; if (server_reactor_utilization.has_value()) { sout << *server_reactor_utilization << " -- "; } for (double cru : client_reactor_utilizations) { sout << cru << ","; } std::cout << sout.str() << std::endl; } void report_summary() const { std::chrono::duration<double> elapsed_d = 0s; unsigned ops = 0u; unsigned sampled_count = 0u; double sampled_total_lat_s = 0.0; for (const auto& summary: summaries) { elapsed_d += (summary.finish_time - summary.start_time); ops += (summary.received_count - summary.start_count); sampled_count += summary.sampled_count; sampled_total_lat_s += summary.sampled_total_lat_s; } double elapsed_s = elapsed_d.count() / (num_clients * num_conns); double iops = ops / elapsed_s; std::ostringstream sout; sout << "--------------" << " summary " << "--------------\n" << setfill(' ') << std::setw(7) << elapsed_s << std::setw(6) << "-" << std::setw(8) << iops << std::setw(8) << iops * bytes_of_block / 1048576 << std::setw(8) << (sampled_total_lat_s / sampled_count * 1000) << "\n"; std::cout << sout.str() << std::endl; } }; seastar::future<> report_period(TimerReport& report) { return container().invoke_on_all([&report] (auto& client) { if (client.is_active()) { for (unsigned i = 0; i < client.num_conns; ++i) { auto &conn_state = client.conn_states[i]; PeriodStats& snap = report.get_snap(client.id, i); conn_state.period_stats.reset_period( conn_state.conn_stats.received_count, client.nr_depth - conn_state.get_current_units(), snap); } report.set_client_reactor_utilization(client.id, get_reactor_utilization()); } if (client.server_sid.has_value() && seastar::this_shard_id() == *client.server_sid) { assert(!client.is_active()); report.set_server_reactor_utilization(get_reactor_utilization()); } }).then([&report] { report.report_period(); }); } seastar::future<> report_summary(TimerReport& report) { return container().invoke_on_all([&report] (auto& client) { if (client.is_active()) { for (unsigned i = 0; i < client.num_conns; ++i) { auto &conn_state = client.conn_states[i]; ConnStats& summary = report.get_summary(client.id, i); summary.prepare_summary(conn_state.conn_stats); } } }).then([&report] { report.report_summary(); }); } public: seastar::future<> dispatch_with_timer(unsigned ramptime, unsigned msgtime) { logger().info("[all clients]: start sending MOSDOps from {} clients * {} conns", num_clients, num_conns); return container().invoke_on_all([] (auto& client) { if (client.is_active()) { for (unsigned i = 0; i < client.num_conns; ++i) { client.do_dispatch_messages(i); } } }).then([ramptime] { logger().info("[all clients]: ramping up {} seconds...", ramptime); return seastar::sleep(std::chrono::seconds(ramptime)); }).then([this] { return container().invoke_on_all([] (auto& client) { if (client.is_active()) { for (unsigned i = 0; i < client.num_conns; ++i) { auto &conn_state = client.conn_states[i]; conn_state.conn_stats.start_collect(); conn_state.period_stats.start_collect(conn_state.conn_stats.received_count); } } }); }).then([this, msgtime] { logger().info("[all clients]: reporting {} seconds...\n", msgtime); return seastar::do_with( TimerReport(num_clients, num_conns, msgtime, msg_len), [this](auto& report) { report.report_header(); return seastar::do_until( [&report] { return report.should_stop(); }, [&report, this] { return report.ticktock().then([&report, this] { // report period every 1s return report_period(report); }).then([&report, this] { // report summary every 10s if (report.get_elapsed() % 10 == 0) { return report_summary(report); } else { return seastar::now(); } }); } ).then([&report, this] { // report the final summary if (report.get_elapsed() % 10 != 0) { return report_summary(report); } else { return seastar::now(); } }); }); }); } private: seastar::future<> send_msg(ConnState &conn_state) { ceph_assert(seastar::this_shard_id() == sid); conn_state.sent_count += 1; return conn_state.depth.wait(1 ).then([this, &conn_state] { const static pg_t pgid; const static object_locator_t oloc; const static hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(), pgid.pool(), oloc.nspace); static spg_t spgid(pgid); auto m = crimson::make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0); bufferlist data(msg_data); m->write(0, msg_len, data); // use tid as the identity of each round m->set_tid(conn_state.sent_count); // sample message latency if (unlikely(conn_state.sent_count % SAMPLE_RATE == 0)) { auto index = conn_state.sent_count % conn_state.time_msgs_sent.size(); ceph_assert(conn_state.time_msgs_sent[index] == lowres_clock_t::time_point::min()); conn_state.time_msgs_sent[index] = lowres_clock_t::now(); } return conn_state.active_conn->send(std::move(m)); }); } class DepthBroken: public std::exception {}; seastar::future<JobReport> stop_dispatch_messages(unsigned i) { auto &conn_state = conn_states[i]; conn_state.stop_send = true; conn_state.depth.broken(DepthBroken()); return conn_state.stopped_send_promise.get_future(); } void do_dispatch_messages(unsigned i) { ceph_assert(seastar::this_shard_id() == sid); auto &conn_state = conn_states[i]; ceph_assert(conn_state.sent_count == 0); conn_state.conn_stats.start_time = mono_clock::now(); // forwarded to stopped_send_promise (void) seastar::do_until( [&conn_state] { return conn_state.stop_send; }, [this, &conn_state] { return send_msg(conn_state); } ).handle_exception_type([] (const DepthBroken& e) { // ok, stopped by stop_dispatch_messages() }).then([this, &conn_state, i] { std::string name = get_name(i); logger().info("{} {}: stopped sending OSDOPs", name, *conn_state.active_conn); std::chrono::duration<double> dur_conn = conn_state.conn_stats.connected_time - conn_state.conn_stats.connecting_time; std::chrono::duration<double> dur_msg = mono_clock::now() - conn_state.conn_stats.start_time; unsigned ops = conn_state.conn_stats.received_count - conn_state.conn_stats.start_count; JobReport stats; stats.name = name; stats.depth = nr_depth; stats.connect_time_s = dur_conn.count(); stats.total_msgs = ops; stats.messaging_time_s = dur_msg.count(); stats.latency_ms = conn_state.conn_stats.sampled_total_lat_s / conn_state.conn_stats.sampled_count * 1000; stats.iops = ops / dur_msg.count(); stats.throughput_mbps = ops / dur_msg.count() * msg_len / 1048576; conn_state.stopped_send_promise.set_value(stats); }); } }; }; std::optional<unsigned> server_sid; bool server_needs_report = false; if (mode == perf_mode_t::both) { ceph_assert(server_conf.is_fixed_cpu == true); server_sid = server_conf.core; } else if (mode == perf_mode_t::server) { server_needs_report = true; } return seastar::when_all( seastar::futurize_invoke([mode, server_conf, server_needs_report] { if (mode == perf_mode_t::client) { return seastar::make_ready_future<test_state::Server*>(nullptr); } else { return create_sharded<test_state::Server>( server_conf.core, server_conf.block_size, server_needs_report); } }), seastar::futurize_invoke([mode, client_conf, server_sid] { if (mode == perf_mode_t::server) { return seastar::make_ready_future<test_state::Client*>(nullptr); } else { return create_sharded<test_state::Client>( client_conf.num_clients, client_conf.num_conns, client_conf.block_size, client_conf.depth, server_sid); } }), crimson::common::sharded_conf().start( EntityName{}, std::string_view{"ceph"} ).then([] { return crimson::common::local_conf().start(); }).then([crc_enabled] { return crimson::common::local_conf().set_val( "ms_crc_data", crc_enabled ? "true" : "false"); }) ).then([=](auto&& ret) { auto server = std::move(std::get<0>(ret).get0()); auto client = std::move(std::get<1>(ret).get0()); // reserve core 0 for potentially better performance if (mode == perf_mode_t::both) { logger().info("\nperf settings:\n smp={}\n {}\n {}\n", seastar::smp::count, client_conf.str(), server_conf.str()); if (client_conf.skip_core_0) { ceph_assert(seastar::smp::count > client_conf.num_clients); } else { ceph_assert(seastar::smp::count >= client_conf.num_clients); } ceph_assert(client_conf.num_clients > 0); ceph_assert(seastar::smp::count > server_conf.core + client_conf.num_clients); return seastar::when_all_succeed( // it is not reasonable to allow server/client to shared cores for // performance benchmarking purposes. server->init(server_conf.addr, server_conf.is_fixed_cpu), client->init() ).then_unpack([client, addr = client_conf.server_addr] { return client->connect_wait_verify(addr); }).then([client, ramptime = client_conf.ramptime, msgtime = client_conf.msgtime] { return client->dispatch_with_timer(ramptime, msgtime); }).then([client] { return client->shutdown(); }).then([server] { return server->shutdown(); }); } else if (mode == perf_mode_t::client) { logger().info("\nperf settings:\n smp={}\n {}\n", seastar::smp::count, client_conf.str()); if (client_conf.skip_core_0) { ceph_assert(seastar::smp::count > client_conf.num_clients); } else { ceph_assert(seastar::smp::count >= client_conf.num_clients); } ceph_assert(client_conf.num_clients > 0); return client->init( ).then([client, addr = client_conf.server_addr] { return client->connect_wait_verify(addr); }).then([client, ramptime = client_conf.ramptime, msgtime = client_conf.msgtime] { return client->dispatch_with_timer(ramptime, msgtime); }).then([client] { return client->shutdown(); }); } else { // mode == perf_mode_t::server ceph_assert(seastar::smp::count > server_conf.core); logger().info("\nperf settings:\n smp={}\n {}\n", seastar::smp::count, server_conf.str()); return seastar::async([server, server_conf] { // FIXME: SIGINT is not received by stop_signal seastar_apps_lib::stop_signal should_stop; server->init(server_conf.addr, server_conf.is_fixed_cpu).get(); should_stop.wait().get(); server->shutdown().get(); }); } }).finally([] { return crimson::common::sharded_conf().stop(); }); } } int main(int argc, char** argv) { seastar::app_template app; app.add_options() ("mode", bpo::value<unsigned>()->default_value(0), "0: both, 1:client, 2:server") ("server-addr", bpo::value<std::string>()->default_value("v2:127.0.0.1:9010"), "server address(only support msgr v2 protocol)") ("ramptime", bpo::value<unsigned>()->default_value(5), "seconds of client ramp-up time") ("msgtime", bpo::value<unsigned>()->default_value(15), "seconds of client messaging time") ("clients", bpo::value<unsigned>()->default_value(1), "number of client messengers") ("conns-per-client", bpo::value<unsigned>()->default_value(1), "number of connections per client") ("client-bs", bpo::value<unsigned>()->default_value(4096), "client block size") ("depth", bpo::value<unsigned>()->default_value(512), "client io depth per job") ("client-skip-core-0", bpo::value<bool>()->default_value(true), "client skip core 0") ("server-fixed-cpu", bpo::value<bool>()->default_value(true), "server is in the fixed cpu mode, non-fixed doesn't support the mode both") ("server-core", bpo::value<unsigned>()->default_value(1), "server messenger running core") ("server-bs", bpo::value<unsigned>()->default_value(0), "server block size") ("crc-enabled", bpo::value<bool>()->default_value(false), "enable CRC checks"); return app.run(argc, argv, [&app] { auto&& config = app.configuration(); auto mode = config["mode"].as<unsigned>(); ceph_assert(mode <= 2); auto _mode = static_cast<perf_mode_t>(mode); bool crc_enabled = config["crc-enabled"].as<bool>(); auto server_conf = server_config::load(config); auto client_conf = client_config::load(config); return run(_mode, client_conf, server_conf, crc_enabled ).then([] { logger().info("\nsuccessful!\n"); }).handle_exception([] (auto eptr) { logger().info("\nfailed!\n"); return seastar::make_exception_future<>(eptr); }); }); }
44,690
35.722268
100
cc
null
ceph-main/src/crimson/tools/perf_staged_fltree.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab #include <boost/program_options.hpp> #include <seastar/core/app-template.hh> #include <seastar/core/thread.hh> #include "crimson/common/config_proxy.h" #include "crimson/common/log.h" #include "crimson/common/perf_counters_collection.h" #include "crimson/os/seastore/onode_manager/staged-fltree/tree_utils.h" #include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h" #include "test/crimson/seastore/onode_tree/test_value.h" #include "test/crimson/seastore/transaction_manager_test_state.h" using namespace crimson::os::seastore::onode; namespace bpo = boost::program_options; seastar::logger& logger() { return crimson::get_logger(ceph_subsys_test); } template <bool TRACK> class PerfTree : public TMTestState { public: PerfTree(bool is_dummy) : is_dummy{is_dummy} {} seastar::future<> run(KVPool<test_item_t>& kvs, double erase_ratio) { return tm_setup().then([this, &kvs, erase_ratio] { return seastar::async([this, &kvs, erase_ratio] { auto tree = std::make_unique<TreeBuilder<TRACK, ExtendedValue>>(kvs, (is_dummy ? NodeExtentManager::create_dummy(true) : NodeExtentManager::create_seastore(*tm))); { auto t = create_mutate_transaction(); with_trans_intr(*t, [&](auto &tr){ return tree->bootstrap(tr); }).unsafe_get(); submit_transaction(std::move(t)); } { auto t = create_mutate_transaction(); with_trans_intr(*t, [&](auto &tr){ return tree->insert(tr); }).unsafe_get(); auto start_time = mono_clock::now(); submit_transaction(std::move(t)); std::chrono::duration<double> duration = mono_clock::now() - start_time; logger().warn("submit_transaction() done! {}s", duration.count()); } { // Note: create_weak_transaction() can also work, but too slow. auto t = create_read_transaction(); with_trans_intr(*t, [&](auto &tr){ return tree->get_stats(tr); }).unsafe_get(); with_trans_intr(*t, [&](auto &tr){ return tree->validate(tr); }).unsafe_get(); } { auto t = create_mutate_transaction(); with_trans_intr(*t, [&](auto &tr){ return tree->erase(tr, kvs.size() * erase_ratio); }).unsafe_get(); submit_transaction(std::move(t)); } { auto t = create_read_transaction(); with_trans_intr(*t, [&](auto &tr){ return tree->get_stats(tr); }).unsafe_get(); with_trans_intr(*t, [&](auto &tr){ return tree->validate(tr); }).unsafe_get(); } tree.reset(); }); }).then([this] { return tm_teardown(); }); } private: bool is_dummy; }; template <bool TRACK> seastar::future<> run(const bpo::variables_map& config) { return seastar::async([&config] { auto backend = config["backend"].as<std::string>(); bool is_dummy; if (backend == "dummy") { is_dummy = true; } else if (backend == "seastore") { is_dummy = false; } else { ceph_abort(false && "invalid backend"); } auto ns_sizes = config["ns-sizes"].as<std::vector<size_t>>(); auto oid_sizes = config["oid-sizes"].as<std::vector<size_t>>(); auto onode_sizes = config["onode-sizes"].as<std::vector<size_t>>(); auto range2 = config["range2"].as<std::vector<int>>(); ceph_assert(range2.size() == 2); auto range1 = config["range1"].as<std::vector<unsigned>>(); ceph_assert(range1.size() == 2); auto range0 = config["range0"].as<std::vector<unsigned>>(); ceph_assert(range0.size() == 2); auto erase_ratio = config["erase-ratio"].as<double>(); ceph_assert(erase_ratio >= 0); ceph_assert(erase_ratio <= 1); using crimson::common::sharded_conf; sharded_conf().start(EntityName{}, std::string_view{"ceph"}).get(); seastar::engine().at_exit([] { return sharded_conf().stop(); }); using crimson::common::sharded_perf_coll; sharded_perf_coll().start().get(); seastar::engine().at_exit([] { return sharded_perf_coll().stop(); }); auto kvs = KVPool<test_item_t>::create_raw_range( ns_sizes, oid_sizes, onode_sizes, {range2[0], range2[1]}, {range1[0], range1[1]}, {range0[0], range0[1]}); PerfTree<TRACK> perf{is_dummy}; perf.run(kvs, erase_ratio).get0(); }); } int main(int argc, char** argv) { seastar::app_template app; app.add_options() ("backend", bpo::value<std::string>()->default_value("dummy"), "tree backend: dummy, seastore") ("tracked", bpo::value<bool>()->default_value(false), "track inserted cursors") ("ns-sizes", bpo::value<std::vector<size_t>>()->default_value( {8, 11, 64, 128, 255, 256}), "sizes of ns strings") ("oid-sizes", bpo::value<std::vector<size_t>>()->default_value( {8, 13, 64, 512, 2035, 2048}), "sizes of oid strings") ("onode-sizes", bpo::value<std::vector<size_t>>()->default_value( {8, 16, 128, 576, 992, 1200}), "sizes of onode") ("range2", bpo::value<std::vector<int>>()->default_value( {0, 128}), "range of shard-pool-crush [a, b)") ("range1", bpo::value<std::vector<unsigned>>()->default_value( {0, 10}), "range of ns-oid strings [a, b)") ("range0", bpo::value<std::vector<unsigned>>()->default_value( {0, 4}), "range of snap-gen [a, b)") ("erase-ratio", bpo::value<double>()->default_value( 0.8), "erase-ratio of all the inserted onodes"); return app.run(argc, argv, [&app] { auto&& config = app.configuration(); auto tracked = config["tracked"].as<bool>(); if (tracked) { return run<true>(config); } else { return run<false>(config); } }); }
6,016
32.614525
82
cc
null
ceph-main/src/crimson/tools/store_nbd/block_driver.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "fs_driver.h" #include "block_driver.h" #include "tm_driver.h" BlockDriverRef get_backend(BlockDriver::config_t config) { if (config.type == "transaction_manager") { return std::make_unique<TMDriver>(config); } else if (config.is_futurized_store()) { return std::make_unique<FSDriver>(config); } else { ceph_assert(0 == "invalid option"); return BlockDriverRef(); } }
502
24.15
70
cc
null
ceph-main/src/crimson/tools/store_nbd/block_driver.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <boost/program_options/variables_map.hpp> #include <boost/program_options/parsers.hpp> #include <seastar/core/future.hh> #include <string> #include <optional> #include "include/buffer.h" /** * BlockDriver * * Simple interface to enable throughput test to compare raw disk to * transaction_manager, etc */ class BlockDriver { public: struct config_t { std::string type; bool mkfs = false; unsigned num_pgs = 128; unsigned log_size = 1000; unsigned object_size = 4<<20 /* 4MB, rbd default */; unsigned oi_size = 1<<9 /* 512b */; unsigned log_entry_size = 1<<9 /* 512b */; bool prepopulate_log = false; std::optional<std::string> path; bool is_futurized_store() const { return type == "seastore" || type == "bluestore"; } std::string get_fs_type() const { ceph_assert(is_futurized_store()); return type; } bool oi_enabled() const { return oi_size > 0; } bool log_enabled() const { return log_entry_size > 0 && log_size > 0; } bool prepopulate_log_enabled() const { return prepopulate_log; } void populate_options( boost::program_options::options_description &desc) { namespace po = boost::program_options; desc.add_options() ("type", po::value<std::string>() ->default_value("transaction_manager") ->notifier([this](auto s) { type = s; }), "Backend to use, options are transaction_manager, seastore" ) ("device-path", po::value<std::string>() ->required() ->notifier([this](auto s) { path = s; }), "Path to device for backend" ) ("num-pgs", po::value<unsigned>() ->notifier([this](auto s) { num_pgs = s; }), "Number of pgs to use for futurized_store backends" ) ("log-size", po::value<unsigned>() ->notifier([this](auto s) { log_size = s; }), "Number of log entries per pg to use for futurized_store backends" ", 0 to disable" ) ("log-entry-size", po::value<unsigned>() ->notifier([this](auto s) { log_entry_size = s; }), "Size of each log entry per pg to use for futurized_store backends" ", 0 to disable" ) ("prepopulate-log", po::value<bool>() ->notifier([this](auto s) { prepopulate_log = s; }), "Prepopulate log on mount" ) ("object-info-size", po::value<unsigned>() ->notifier([this](auto s) { log_entry_size = s; }), "Size of each log entry per pg to use for futurized_store backends" ", 0 to disable" ) ("object-size", po::value<unsigned>() ->notifier([this](auto s) { object_size = s; }), "Object size to use for futurized_store backends" ) ("mkfs", po::value<bool>() ->default_value(false) ->notifier([this](auto s) { mkfs = s; }), "Do mkfs first" ); } }; virtual ceph::bufferptr get_buffer(size_t size) = 0; virtual seastar::future<> write( off_t offset, ceph::bufferptr ptr) = 0; virtual seastar::future<ceph::bufferlist> read( off_t offset, size_t size) = 0; virtual size_t get_size() const = 0; virtual seastar::future<> mount() = 0; virtual seastar::future<> close() = 0; virtual ~BlockDriver() {} }; using BlockDriverRef = std::unique_ptr<BlockDriver>; BlockDriverRef get_backend(BlockDriver::config_t config);
3,328
23.659259
70
h
null
ceph-main/src/crimson/tools/store_nbd/fs_driver.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <boost/iterator/counting_iterator.hpp> #include <fmt/format.h> #include "os/Transaction.h" #include "fs_driver.h" using namespace crimson; using namespace crimson::os; coll_t get_coll(unsigned num) { return coll_t(spg_t(pg_t(0, num))); } ghobject_t get_log_object(unsigned coll) { return ghobject_t( shard_id_t::NO_SHARD, 0, (coll << 16), "", "", 0, ghobject_t::NO_GEN); } std::string make_log_key( unsigned i) { return fmt::format("log_entry_{}", i); } void add_log_entry( unsigned i, unsigned entry_size, std::map<std::string, ceph::buffer::list> *omap) { assert(omap); bufferlist bl; bl.append(ceph::buffer::create('0', entry_size)); omap->emplace(std::make_pair(make_log_key(i), bl)); } void populate_log( ceph::os::Transaction &t, FSDriver::pg_analogue_t &pg, unsigned entry_size, unsigned entries) { t.touch(pg.collection->get_cid(), pg.log_object); // omap_clear not yet implemented, TODO // t.omap_clear(pg.collection->get_cid(), pg.log_object); std::map<std::string, ceph::buffer::list> omap; for (unsigned i = 0; i < entries; ++i) { add_log_entry(i, entry_size, &omap); } t.omap_setkeys( pg.collection->get_cid(), pg.log_object, omap); pg.log_head = entries; } void update_log( ceph::os::Transaction &t, FSDriver::pg_analogue_t &pg, unsigned entry_size, unsigned entries) { ++pg.log_head; std::map<std::string, ceph::buffer::list> key; add_log_entry(pg.log_head, entry_size, &key); t.omap_setkeys( pg.collection->get_cid(), pg.log_object, key); while ((pg.log_head - pg.log_tail) > entries) { t.omap_rmkey( pg.collection->get_cid(), pg.log_object, make_log_key(pg.log_tail)); ++pg.log_tail; } } FSDriver::offset_mapping_t FSDriver::map_offset(off_t offset) { uint32_t objid = offset / config.object_size; uint32_t collid = objid % config.num_pgs; return offset_mapping_t{ collections[collid], ghobject_t( shard_id_t::NO_SHARD, 0, (collid << 16) | (objid + 1), "", "", 0, ghobject_t::NO_GEN), offset % config.object_size }; } seastar::future<> FSDriver::write( off_t offset, bufferptr ptr) { auto mapping = map_offset(offset); ceph_assert(mapping.offset + ptr.length() <= config.object_size); ceph::os::Transaction t; bufferlist bl; bl.append(ptr); t.write( mapping.pg.collection->get_cid(), mapping.object, mapping.offset, ptr.length(), bl, 0); if (config.oi_enabled() ) { bufferlist attr; attr.append(ceph::buffer::create(config.oi_size, '0')); t.setattr( mapping.pg.collection->get_cid(), mapping.object, "_", attr); } if (config.log_enabled()) { update_log( t, mapping.pg, config.log_entry_size, config.log_size); } return sharded_fs->do_transaction( mapping.pg.collection, std::move(t)); } seastar::future<bufferlist> FSDriver::read( off_t offset, size_t size) { auto mapping = map_offset(offset); ceph_assert((mapping.offset + size) <= config.object_size); return sharded_fs->read( mapping.pg.collection, mapping.object, mapping.offset, size, 0 ).handle_error( crimson::ct_error::enoent::handle([size](auto &e) { bufferlist bl; bl.append_zero(size); return seastar::make_ready_future<bufferlist>(std::move(bl)); }), crimson::ct_error::assert_all{"Unrecoverable error in FSDriver::read"} ).then([size](auto &&bl) { if (bl.length() < size) { bl.append_zero(size - bl.length()); } return seastar::make_ready_future<bufferlist>(std::move(bl)); }); } seastar::future<> FSDriver::mkfs() { return init( ).then([this] { assert(fs); uuid_d uuid; uuid.generate_random(); return fs->mkfs(uuid).handle_error( crimson::stateful_ec::handle([] (const auto& ec) { crimson::get_logger(ceph_subsys_test) .error("error creating empty object store in {}: ({}) {}", crimson::common::local_conf().get_val<std::string>("osd_data"), ec.value(), ec.message()); std::exit(EXIT_FAILURE); })); }).then([this] { return fs->stop(); }).then([this] { return init(); }).then([this] { return fs->mount( ).handle_error( crimson::stateful_ec::handle([] (const auto& ec) { crimson::get_logger( ceph_subsys_test ).error( "error mounting object store in {}: ({}) {}", crimson::common::local_conf().get_val<std::string>("osd_data"), ec.value(), ec.message()); std::exit(EXIT_FAILURE); })); }).then([this] { return seastar::do_for_each( boost::counting_iterator<unsigned>(0), boost::counting_iterator<unsigned>(config.num_pgs), [this](auto i) { return sharded_fs->create_new_collection(get_coll(i) ).then([this, i](auto coll) { ceph::os::Transaction t; t.create_collection(get_coll(i), 0); return sharded_fs->do_transaction(coll, std::move(t)); }); }); }).then([this] { return fs->umount(); }).then([this] { return fs->stop(); }).then([this] { fs.reset(); return seastar::now(); }); } seastar::future<> FSDriver::mount() { ceph_assert(config.path); return ( config.mkfs ? mkfs() : seastar::now() ).then([this] { return init(); }).then([this] { return fs->mount( ).handle_error( crimson::stateful_ec::handle([] (const auto& ec) { crimson::get_logger( ceph_subsys_test ).error( "error mounting object store in {}: ({}) {}", crimson::common::local_conf().get_val<std::string>("osd_data"), ec.value(), ec.message()); std::exit(EXIT_FAILURE); })); }).then([this] { return seastar::do_for_each( boost::counting_iterator<unsigned>(0), boost::counting_iterator<unsigned>(config.num_pgs), [this](auto i) { return sharded_fs->open_collection(get_coll(i) ).then([this, i](auto ref) { collections[i].collection = ref; collections[i].log_object = get_log_object(i); if (config.log_enabled()) { ceph::os::Transaction t; if (config.prepopulate_log_enabled()) { populate_log( t, collections[i], config.log_entry_size, config.log_size); } return sharded_fs->do_transaction( collections[i].collection, std::move(t)); } else { return seastar::now(); } }); }); }).then([this] { return fs->stat(); }).then([this](auto s) { size = s.total; }); }; seastar::future<> FSDriver::close() { collections.clear(); return fs->umount( ).then([this] { return fs->stop(); }).then([this] { fs.reset(); return seastar::now(); }); } seastar::future<> FSDriver::init() { fs.reset(); fs = FuturizedStore::create( config.get_fs_type(), *config.path, crimson::common::local_conf().get_config_values() ); return fs->start().then([this] { sharded_fs = &(fs->get_sharded_store()); }); }
7,101
21.836013
74
cc
null
ceph-main/src/crimson/tools/store_nbd/fs_driver.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "block_driver.h" #include "crimson/os/futurized_collection.h" #include "crimson/os/futurized_store.h" class FSDriver final : public BlockDriver { public: FSDriver(config_t config) : config(config) {} ~FSDriver() final {} bufferptr get_buffer(size_t size) final { return ceph::buffer::create_page_aligned(size); } seastar::future<> write( off_t offset, bufferptr ptr) final; seastar::future<bufferlist> read( off_t offset, size_t size) final; size_t get_size() const { return size; } seastar::future<> mount() final; seastar::future<> close() final; private: size_t size = 0; const config_t config; std::unique_ptr<crimson::os::FuturizedStore> fs; crimson::os::FuturizedStore::Shard* sharded_fs; struct pg_analogue_t { crimson::os::CollectionRef collection; ghobject_t log_object; unsigned log_tail = 0; unsigned log_head = 0; }; std::map<unsigned, pg_analogue_t> collections; struct offset_mapping_t { pg_analogue_t &pg; ghobject_t object; off_t offset; }; offset_mapping_t map_offset(off_t offset); seastar::future<> mkfs(); seastar::future<> init(); friend void populate_log( ceph::os::Transaction &, pg_analogue_t &, unsigned, unsigned); friend void update_log( ceph::os::Transaction &, FSDriver::pg_analogue_t &, unsigned, unsigned); };
1,500
19.561644
70
h
null
ceph-main/src/crimson/tools/store_nbd/store-nbd.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- /** * crimson-store-nbd * * This tool exposes crimson object store internals as an nbd server * for use with fio in basic benchmarking. * * Example usage: * * $ ./bin/crimson-store-nbd --device-path /dev/nvme1n1 -c 1 --mkfs true --uds-path /tmp/store_nbd_socket.sock * * $ cat nbd.fio * [global] * ioengine=nbd * uri=nbd+unix:///?socket=/tmp/store_nbd_socket.sock * rw=randrw * time_based * runtime=120 * group_reporting * iodepth=1 * size=500G * * [job0] * offset=0 * * $ fio nbd.fio */ #include <random> #include <boost/program_options/variables_map.hpp> #include <boost/program_options/parsers.hpp> #include <linux/nbd.h> #include <linux/fs.h> #include <seastar/apps/lib/stop_signal.hh> #include <seastar/core/app-template.hh> #include <seastar/core/byteorder.hh> #include <seastar/core/future-util.hh> #include <seastar/core/gate.hh> #include <seastar/core/reactor.hh> #include <seastar/core/rwlock.hh> #include <seastar/core/thread.hh> #include <seastar/util/defer.hh> #include "crimson/common/config_proxy.h" #include "crimson/common/log.h" #include "block_driver.h" namespace po = boost::program_options; using namespace ceph; namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_test); } } struct request_context_t { uint32_t magic = 0; uint32_t type = 0; char handle[8] = {0}; uint64_t from = 0; uint32_t len = 0; unsigned err = 0; std::optional<bufferptr> in_buffer; std::optional<bufferlist> out_buffer; using ref = std::unique_ptr<request_context_t>; static ref make_ref() { return std::make_unique<request_context_t>(); } bool check_magic() const { auto ret = magic == NBD_REQUEST_MAGIC; if (!ret) { logger().error( "Invalid magic {} should be {}", magic, NBD_REQUEST_MAGIC); } return ret; } uint32_t get_command() const { return type & 0xff; } bool has_input_buffer() const { return get_command() == NBD_CMD_WRITE; } seastar::future<> read_request(seastar::input_stream<char> &in) { return in.read_exactly(sizeof(struct nbd_request) ).then([this, &in](auto buf) { if (buf.size() < sizeof(struct nbd_request)) { throw std::system_error( std::make_error_code( std::errc::connection_reset)); } auto p = buf.get(); magic = seastar::consume_be<uint32_t>(p); type = seastar::consume_be<uint32_t>(p); memcpy(handle, p, sizeof(handle)); p += sizeof(handle); from = seastar::consume_be<uint64_t>(p); len = seastar::consume_be<uint32_t>(p); logger().debug( "Got request, magic {}, type {}, from {}, len {}", magic, type, from, len); if (!check_magic()) { throw std::system_error( std::make_error_code( std::errc::invalid_argument)); } if (has_input_buffer()) { return in.read_exactly(len).then([this](auto buf) { in_buffer = ceph::buffer::create_page_aligned(len); in_buffer->copy_in(0, len, buf.get()); return seastar::now(); }); } else { return seastar::now(); } }); } seastar::future<> write_reply(seastar::output_stream<char> &out) { seastar::temporary_buffer<char> buffer{sizeof(struct nbd_reply)}; auto p = buffer.get_write(); seastar::produce_be<uint32_t>(p, NBD_REPLY_MAGIC); seastar::produce_be<uint32_t>(p, err); logger().debug("write_reply writing err {}", err); memcpy(p, handle, sizeof(handle)); return out.write(std::move(buffer)).then([this, &out] { if (out_buffer) { return seastar::do_for_each( out_buffer->mut_buffers(), [&out](bufferptr &ptr) { logger().debug("write_reply writing {}", ptr.length()); return out.write( seastar::temporary_buffer<char>( ptr.c_str(), ptr.length(), seastar::make_deleter([ptr](){})) ); }); } else { return seastar::now(); } }).then([&out] { return out.flush(); }); } }; struct RequestWriter { seastar::rwlock lock; seastar::output_stream<char> stream; seastar::gate gate; RequestWriter( seastar::output_stream<char> &&stream) : stream(std::move(stream)) {} RequestWriter(RequestWriter &&) = default; seastar::future<> complete(request_context_t::ref &&req) { auto &request = *req; return lock.write_lock( ).then([&request, this] { return request.write_reply(stream); }).finally([&, this, req=std::move(req)] { lock.write_unlock(); logger().debug("complete"); return seastar::now(); }); } seastar::future<> close() { return gate.close().then([this] { return stream.close(); }); } }; /** * NBDHandler * * Simple throughput test for concurrent, single threaded * writes to an BlockDriver. */ class NBDHandler { BlockDriver &backend; std::string uds_path; std::optional<seastar::server_socket> server_socket; std::optional<seastar::connected_socket> connected_socket; seastar::gate gate; public: struct config_t { std::string uds_path; void populate_options( po::options_description &desc) { desc.add_options() ("uds-path", po::value<std::string>() ->default_value("/tmp/store_nbd_socket.sock") ->notifier([this](auto s) { uds_path = s; }), "Path to domain socket for nbd" ); } }; NBDHandler( BlockDriver &backend, config_t config) : backend(backend), uds_path(config.uds_path) {} void run(); seastar::future<> stop(); }; int main(int argc, char** argv) { po::options_description desc{"Allowed options"}; bool debug = false; desc.add_options() ("help,h", "show help message") ("debug", po::value<bool>(&debug)->default_value(false), "enable debugging"); po::options_description nbd_pattern_options{"NBD Pattern Options"}; NBDHandler::config_t nbd_config; nbd_config.populate_options(nbd_pattern_options); desc.add(nbd_pattern_options); po::options_description backend_pattern_options{"Backend Options"}; BlockDriver::config_t backend_config; backend_config.populate_options(backend_pattern_options); desc.add(backend_pattern_options); po::variables_map vm; std::vector<std::string> unrecognized_options; try { auto parsed = po::command_line_parser(argc, argv) .options(desc) .allow_unregistered() .run(); po::store(parsed, vm); if (vm.count("help")) { std::cout << desc << std::endl; return 0; } po::notify(vm); unrecognized_options = po::collect_unrecognized(parsed.options, po::include_positional); } catch(const po::error& e) { std::cerr << "error: " << e.what() << std::endl; return 1; } std::vector<const char*> args(argv, argv + argc); seastar::app_template::config app_cfg; app_cfg.name = "crimson-store-nbd"; app_cfg.auto_handle_sigint_sigterm = false; seastar::app_template app(std::move(app_cfg)); std::vector<char*> av{argv[0]}; std::transform(begin(unrecognized_options), end(unrecognized_options), std::back_inserter(av), [](auto& s) { return const_cast<char*>(s.c_str()); }); return app.run(av.size(), av.data(), [&] { if (debug) { seastar::global_logger_registry().set_all_loggers_level( seastar::log_level::debug ); } return seastar::async([&] { seastar_apps_lib::stop_signal should_stop; crimson::common::sharded_conf() .start(EntityName{}, std::string_view{"ceph"}).get(); auto stop_conf = seastar::defer([] { crimson::common::sharded_conf().stop().get(); }); auto backend = get_backend(backend_config); NBDHandler nbd(*backend, nbd_config); backend->mount().get(); auto close_backend = seastar::defer([&] { backend->close().get(); }); logger().debug("Running nbd server..."); nbd.run(); auto stop_nbd = seastar::defer([&] { nbd.stop().get(); }); should_stop.wait().get(); return 0; }); }); } class nbd_oldstyle_negotiation_t { uint64_t magic = seastar::cpu_to_be(0x4e42444d41474943); // "NBDMAGIC" uint64_t magic2 = seastar::cpu_to_be(0x00420281861253); // "IHAVEOPT" uint64_t size = 0; uint32_t flags = seastar::cpu_to_be(0); char reserved[124] = {0}; public: nbd_oldstyle_negotiation_t(uint64_t size, uint32_t flags) : size(seastar::cpu_to_be(size)), flags(seastar::cpu_to_be(flags)) {} } __attribute__((packed)); seastar::future<> send_negotiation( size_t size, seastar::output_stream<char>& out) { seastar::temporary_buffer<char> buf{sizeof(nbd_oldstyle_negotiation_t)}; new (buf.get_write()) nbd_oldstyle_negotiation_t(size, 1); return out.write(std::move(buf) ).then([&out] { return out.flush(); }); } seastar::future<> handle_command( BlockDriver &backend, request_context_t::ref request_ref, RequestWriter &out) { auto &request = *request_ref; logger().debug("got command {}", request.get_command()); return ([&] { switch (request.get_command()) { case NBD_CMD_WRITE: return backend.write( request.from, *request.in_buffer); case NBD_CMD_READ: return backend.read( request.from, request.len).then([&] (auto buffer) { logger().debug("read returned buffer len {}", buffer.length()); request.out_buffer = buffer; }); case NBD_CMD_DISC: throw std::system_error(std::make_error_code(std::errc::bad_message)); case NBD_CMD_TRIM: throw std::system_error(std::make_error_code(std::errc::bad_message)); default: throw std::system_error(std::make_error_code(std::errc::bad_message)); } })().then([&, request_ref=std::move(request_ref)]() mutable { logger().debug("handle_command complete"); return out.complete(std::move(request_ref)); }); } seastar::future<> handle_commands( BlockDriver &backend, seastar::input_stream<char>& in, RequestWriter &out) { logger().debug("handle_commands"); return seastar::keep_doing([&] { logger().debug("waiting for command"); auto request_ref = request_context_t::make_ref(); auto &request = *request_ref; return request.read_request(in).then( [&, request_ref=std::move(request_ref)]() mutable { // keep running in background (void)seastar::try_with_gate(out.gate, [&backend, &out, request_ref=std::move(request_ref)]() mutable { return handle_command(backend, std::move(request_ref), out); }); logger().debug("handle_commands after fork"); }); }).handle_exception_type([](const seastar::gate_closed_exception&) {}); } void NBDHandler::run() { logger().debug("About to listen on {}", uds_path); server_socket = seastar::engine().listen( seastar::socket_address{ seastar::unix_domain_addr{uds_path}}); // keep running in background (void)seastar::keep_doing([this] { return seastar::try_with_gate(gate, [this] { return server_socket->accept().then([this](auto acc) { logger().debug("Accepted"); connected_socket = std::move(acc.connection); return seastar::do_with( connected_socket->input(), RequestWriter{connected_socket->output()}, [&, this](auto &input, auto &output) { return send_negotiation( backend.get_size(), output.stream ).then([&, this] { return handle_commands(backend, input, output); }).finally([&] { std::cout << "closing input and output" << std::endl; return seastar::when_all(input.close(), output.close()); }).discard_result().handle_exception([](auto e) { logger().error("NBDHandler::run saw exception {}", e); }); }); }).handle_exception_type([] (const std::system_error &e) { // an ECONNABORTED is expected when we are being stopped. if (e.code() != std::errc::connection_aborted) { logger().error("accept failed: {}", e); } }); }); }).handle_exception_type([](const seastar::gate_closed_exception&) {}); } seastar::future<> NBDHandler::stop() { if (server_socket) { server_socket->abort_accept(); } if (connected_socket) { connected_socket->shutdown_input(); connected_socket->shutdown_output(); } return gate.close().then([this] { if (!server_socket.has_value()) { return seastar::now(); } return seastar::remove_file(uds_path); }); }
12,641
26.66302
111
cc
null
ceph-main/src/crimson/tools/store_nbd/tm_driver.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "tm_driver.h" using namespace crimson; using namespace crimson::os; using namespace crimson::os::seastore; namespace { seastar::logger& logger() { return crimson::get_logger(ceph_subsys_test); } } seastar::future<> TMDriver::write( off_t offset, bufferptr ptr) { logger().debug("Writing offset {}", offset); assert(offset % device->get_block_size() == 0); assert((ptr.length() % device->get_block_size()) == 0); return seastar::do_with(ptr, [this, offset](auto& ptr) { return repeat_eagain([this, offset, &ptr] { return tm->with_transaction_intr( Transaction::src_t::MUTATE, "write", [this, offset, &ptr](auto& t) { return tm->dec_ref(t, offset ).si_then([](auto){}).handle_error_interruptible( crimson::ct_error::enoent::handle([](auto) { return seastar::now(); }), crimson::ct_error::pass_further_all{} ).si_then([this, offset, &t, &ptr] { logger().debug("dec_ref complete"); return tm->alloc_extent<TestBlock>(t, offset, ptr.length()); }).si_then([this, offset, &t, &ptr](auto ext) { boost::ignore_unused(offset); // avoid clang warning; assert(ext->get_laddr() == (size_t)offset); assert(ext->get_bptr().length() == ptr.length()); ext->get_bptr().swap(ptr); logger().debug("submitting transaction"); return tm->submit_transaction(t); }); }); }); }).handle_error( crimson::ct_error::assert_all{"store-nbd write"} ); } TMDriver::read_extents_ret TMDriver::read_extents( Transaction &t, laddr_t offset, extent_len_t length) { return seastar::do_with( lba_pin_list_t(), lextent_list_t<TestBlock>(), [this, &t, offset, length](auto &pins, auto &ret) { return tm->get_pins( t, offset, length ).si_then([this, &t, &pins, &ret](auto _pins) { _pins.swap(pins); logger().debug("read_extents: mappings {}", pins); return trans_intr::do_for_each( pins.begin(), pins.end(), [this, &t, &ret](auto &&pin) { logger().debug( "read_extents: get_extent {}~{}", pin->get_val(), pin->get_length()); return tm->read_pin<TestBlock>( t, std::move(pin) ).si_then([&ret](auto ref) mutable { ret.push_back(std::make_pair(ref->get_laddr(), ref)); logger().debug( "read_extents: got extent {}", *ref); return seastar::now(); }); }).si_then([&ret] { return std::move(ret); }); }); }); } seastar::future<bufferlist> TMDriver::read( off_t offset, size_t size) { logger().debug("Reading offset {}", offset); assert(offset % device->get_block_size() == 0); assert(size % device->get_block_size() == 0); auto blptrret = std::make_unique<bufferlist>(); auto &blret = *blptrret; return repeat_eagain([=, &blret, this] { return tm->with_transaction_intr( Transaction::src_t::READ, "read", [=, &blret, this](auto& t) { return read_extents(t, offset, size ).si_then([=, &blret](auto ext_list) { size_t cur = offset; for (auto &i: ext_list) { if (cur != i.first) { assert(cur < i.first); blret.append_zero(i.first - cur); cur = i.first; } blret.append(i.second->get_bptr()); cur += i.second->get_bptr().length(); } if (blret.length() != size) { assert(blret.length() < size); blret.append_zero(size - blret.length()); } }); }); }).handle_error( crimson::ct_error::assert_all{"store-nbd read"} ).then([blptrret=std::move(blptrret)]() mutable { logger().debug("read complete"); return std::move(*blptrret); }); } void TMDriver::init() { std::vector<Device*> sec_devices; #ifndef NDEBUG tm = make_transaction_manager(device.get(), sec_devices, true); #else tm = make_transaction_manager(device.get(), sec_devices, false); #endif } void TMDriver::clear() { tm.reset(); } size_t TMDriver::get_size() const { return device->get_available_size() * .5; } seastar::future<> TMDriver::mkfs() { assert(config.path); logger().debug("mkfs"); return Device::make_device(*config.path, device_type_t::SSD ).then([this](DeviceRef dev) { device = std::move(dev); seastore_meta_t meta; meta.seastore_id.generate_random(); return device->mkfs( device_config_t{ true, (magic_t)std::rand(), device_type_t::SSD, 0, meta, secondary_device_set_t()}); }).safe_then([this] { logger().debug("device mkfs done"); return device->mount(); }).safe_then([this] { init(); logger().debug("tm mkfs"); return tm->mkfs(); }).safe_then([this] { logger().debug("tm close"); return tm->close(); }).safe_then([this] { logger().debug("sm close"); return device->close(); }).safe_then([this] { clear(); device.reset(); logger().debug("mkfs complete"); return TransactionManager::mkfs_ertr::now(); }).handle_error( crimson::ct_error::assert_all{ "Invalid errror during TMDriver::mkfs" } ); } seastar::future<> TMDriver::mount() { return (config.mkfs ? mkfs() : seastar::now() ).then([this] { return Device::make_device(*config.path, device_type_t::SSD); }).then([this](DeviceRef dev) { device = std::move(dev); return device->mount(); }).safe_then([this] { init(); return tm->mount(); }).handle_error( crimson::ct_error::assert_all{ "Invalid errror during TMDriver::mount" } ); }; seastar::future<> TMDriver::close() { return tm->close().safe_then([this] { clear(); return device->close(); }).handle_error( crimson::ct_error::assert_all{ "Invalid errror during TMDriver::close" } ); }
5,947
25.672646
81
cc
null
ceph-main/src/crimson/tools/store_nbd/tm_driver.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "block_driver.h" #include "crimson/os/seastore/cache.h" #include "crimson/os/seastore/device.h" #include "crimson/os/seastore/transaction_manager.h" #include "test/crimson/seastore/test_block.h" class TMDriver final : public BlockDriver { public: TMDriver(config_t config) : config(config) {} ~TMDriver() final {} bufferptr get_buffer(size_t size) final { return ceph::buffer::create_page_aligned(size); } seastar::future<> write( off_t offset, bufferptr ptr) final; seastar::future<bufferlist> read( off_t offset, size_t size) final; size_t get_size() const final; seastar::future<> mount() final; seastar::future<> close() final; private: const config_t config; using DeviceRef = crimson::os::seastore::DeviceRef; DeviceRef device; using TransactionManager = crimson::os::seastore::TransactionManager; using TransactionManagerRef = crimson::os::seastore::TransactionManagerRef; TransactionManagerRef tm; seastar::future<> mkfs(); void init(); void clear(); using read_extents_iertr = TransactionManager::read_extent_iertr; using read_extents_ret = read_extents_iertr::future< crimson::os::seastore::lextent_list_t<crimson::os::seastore::TestBlock> >; read_extents_ret read_extents( crimson::os::seastore::Transaction &t, crimson::os::seastore::laddr_t offset, crimson::os::seastore::extent_len_t length); };
1,513
25.561404
77
h
null
ceph-main/src/crush/CrushCompiler.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "CrushCompiler.h" #if defined(_AIX) #define EBADE ECORRUPT #endif #ifndef EBADE #define EBADE EFTYPE #endif #include <string> #include "common/errno.h" #include <boost/algorithm/string.hpp> using std::cout; using std::istream; using std::map; using std::ostream; using std::set; using std::string; using std::vector; // ------------- static void print_type_name(ostream& out, int t, CrushWrapper &crush) { const char *name = crush.get_type_name(t); if (name) out << name; else if (t == 0) out << "device"; else out << "type" << t; } static void print_item_name(ostream& out, int t, CrushWrapper &crush) { const char *name = crush.get_item_name(t); if (name) out << name; else if (t >= 0) out << "device" << t; else out << "bucket" << (-1-t); } static void print_bucket_class_ids(ostream& out, int t, CrushWrapper &crush) { if (crush.class_bucket.count(t) == 0) return; auto &class_to_id = crush.class_bucket[t]; for (auto &i : class_to_id) { int c = i.first; int cid = i.second; const char* class_name = crush.get_class_name(c); ceph_assert(class_name); out << "\tid " << cid << " class " << class_name << "\t\t# do not change unnecessarily\n"; } } static void print_item_class(ostream& out, int t, CrushWrapper &crush) { const char *c = crush.get_item_class(t); if (c) out << " class " << c; } static void print_class(ostream& out, int t, CrushWrapper &crush) { const char *c = crush.get_class_name(t); if (c) out << " class " << c; else out << " # unexpected class " << t; } static void print_rule_name(ostream& out, int t, CrushWrapper &crush) { const char *name = crush.get_rule_name(t); if (name) out << name; else out << "rule" << t; } static void print_fixedpoint(ostream& out, int i) { char s[20]; snprintf(s, sizeof(s), "%.5f", (float)i / (float)0x10000); out << s; } int CrushCompiler::decompile_bucket_impl(int i, ostream &out) { const char *name = crush.get_item_name(i); if (name && !crush.is_valid_crush_name(name)) return 0; int type = crush.get_bucket_type(i); print_type_name(out, type, crush); out << " "; print_item_name(out, i, crush); out << " {\n"; out << "\tid " << i << "\t\t# do not change unnecessarily\n"; print_bucket_class_ids(out, i, crush); out << "\t# weight "; print_fixedpoint(out, crush.get_bucket_weight(i)); out << "\n"; int n = crush.get_bucket_size(i); int alg = crush.get_bucket_alg(i); out << "\talg " << crush_bucket_alg_name(alg); // notate based on alg type bool dopos = false; switch (alg) { case CRUSH_BUCKET_UNIFORM: out << "\t# do not change bucket size (" << n << ") unnecessarily"; dopos = true; break; case CRUSH_BUCKET_LIST: out << "\t# add new items at the end; do not change order unnecessarily"; break; case CRUSH_BUCKET_TREE: out << "\t# do not change pos for existing items unnecessarily"; dopos = true; break; } out << "\n"; int hash = crush.get_bucket_hash(i); out << "\thash " << hash << "\t# " << crush_hash_name(hash) << "\n"; for (int j=0; j<n; j++) { int item = crush.get_bucket_item(i, j); int w = crush.get_bucket_item_weight(i, j); out << "\titem "; print_item_name(out, item, crush); out << " weight "; print_fixedpoint(out, w); if (dopos) out << " pos " << j; out << "\n"; } out << "}\n"; return 0; } /* Basically, we just descend recursively into all of the buckets, * executing a depth-first traversal of the graph. Since the buckets form a * directed acyclic graph, this should work just fine. The graph isn't * necessarily a tree, so we have to keep track of what buckets we already * outputted. We don't want to output anything twice. We also keep track of * what buckets are in progress so that we can detect cycles. These can * arise through user error. */ int CrushCompiler::decompile_bucket(int cur, std::map<int, dcb_state_t>& dcb_states, ostream &out) { if ((cur == 0) || (!crush.bucket_exists(cur))) return 0; std::map<int, dcb_state_t>::iterator c = dcb_states.find(cur); if (c == dcb_states.end()) { // Mark this bucket as "in progress." std::map<int, dcb_state_t>::value_type val(cur, DCB_STATE_IN_PROGRESS); std::pair <std::map<int, dcb_state_t>::iterator, bool> rval (dcb_states.insert(val)); ceph_assert(rval.second); c = rval.first; } else if (c->second == DCB_STATE_DONE) { // We already did this bucket. return 0; } else if (c->second == DCB_STATE_IN_PROGRESS) { err << "decompile_crush_bucket: logic error: tried to decompile " "a bucket that is already being decompiled" << std::endl; return -EBADE; } else { err << "decompile_crush_bucket: logic error: illegal bucket state! " << c->second << std::endl; return -EBADE; } int bsize = crush.get_bucket_size(cur); for (int i = 0; i < bsize; ++i) { int item = crush.get_bucket_item(cur, i); std::map<int, dcb_state_t>::iterator d = dcb_states.find(item); if (d == dcb_states.end()) { int ret = decompile_bucket(item, dcb_states, out); if (ret) return ret; } else if (d->second == DCB_STATE_IN_PROGRESS) { err << "decompile_crush_bucket: error: while trying to output bucket " << cur << ", we found out that it contains one of the buckets that " << "contain it. This is not allowed. The buckets must form a " << "directed acyclic graph." << std::endl; return -EINVAL; } else if (d->second != DCB_STATE_DONE) { err << "decompile_crush_bucket: logic error: illegal bucket state " << d->second << std::endl; return -EBADE; } } decompile_bucket_impl(cur, out); c->second = DCB_STATE_DONE; return 0; } int CrushCompiler::decompile_weight_set_weights(crush_weight_set weight_set, ostream &out) { out << " [ "; for (__u32 i = 0; i < weight_set.size; i++) { print_fixedpoint(out, weight_set.weights[i]); out << " "; } out << "]\n"; return 0; } int CrushCompiler::decompile_weight_set(crush_weight_set *weight_set, __u32 size, ostream &out) { out << " weight_set [\n"; for (__u32 i = 0; i < size; i++) { int r = decompile_weight_set_weights(weight_set[i], out); if (r < 0) return r; } out << " ]\n"; return 0; } int CrushCompiler::decompile_ids(__s32 *ids, __u32 size, ostream &out) { out << " ids [ "; for (__u32 i = 0; i < size; i++) out << ids[i] << " "; out << "]\n"; return 0; } int CrushCompiler::decompile_choose_arg(crush_choose_arg *arg, int bucket_id, ostream &out) { int r; out << " {\n"; out << " bucket_id " << bucket_id << "\n"; if (arg->weight_set_positions > 0) { r = decompile_weight_set(arg->weight_set, arg->weight_set_positions, out); if (r < 0) return r; } if (arg->ids_size > 0) { r = decompile_ids(arg->ids, arg->ids_size, out); if (r < 0) return r; } out << " }\n"; return 0; } int CrushCompiler::decompile_choose_arg_map(crush_choose_arg_map arg_map, ostream &out) { for (__u32 i = 0; i < arg_map.size; i++) { if ((arg_map.args[i].ids_size == 0) && (arg_map.args[i].weight_set_positions == 0)) continue; int r = decompile_choose_arg(&arg_map.args[i], -1-i, out); if (r < 0) return r; } return 0; } int CrushCompiler::decompile_choose_args(const std::pair<const long unsigned int, crush_choose_arg_map> &i, ostream &out) { out << "choose_args " << i.first << " {\n"; int r = decompile_choose_arg_map(i.second, out); if (r < 0) return r; out << "}\n"; return 0; } int CrushCompiler::decompile(ostream &out) { out << "# begin crush map\n"; // only dump tunables if they differ from the defaults if (crush.get_choose_local_tries() != 2) out << "tunable choose_local_tries " << crush.get_choose_local_tries() << "\n"; if (crush.get_choose_local_fallback_tries() != 5) out << "tunable choose_local_fallback_tries " << crush.get_choose_local_fallback_tries() << "\n"; if (crush.get_choose_total_tries() != 19) out << "tunable choose_total_tries " << crush.get_choose_total_tries() << "\n"; if (crush.get_chooseleaf_descend_once() != 0) out << "tunable chooseleaf_descend_once " << crush.get_chooseleaf_descend_once() << "\n"; if (crush.get_chooseleaf_vary_r() != 0) out << "tunable chooseleaf_vary_r " << crush.get_chooseleaf_vary_r() << "\n"; if (crush.get_chooseleaf_stable() != 0) out << "tunable chooseleaf_stable " << crush.get_chooseleaf_stable() << "\n"; if (crush.get_straw_calc_version() != 0) out << "tunable straw_calc_version " << crush.get_straw_calc_version() << "\n"; if (crush.get_allowed_bucket_algs() != CRUSH_LEGACY_ALLOWED_BUCKET_ALGS) out << "tunable allowed_bucket_algs " << crush.get_allowed_bucket_algs() << "\n"; out << "\n# devices\n"; for (int i=0; i<crush.get_max_devices(); i++) { const char *name = crush.get_item_name(i); if (name) { out << "device " << i << " " << name; print_item_class(out, i, crush); out << "\n"; } } out << "\n# types\n"; int n = crush.get_num_type_names(); for (int i=0; n; i++) { const char *name = crush.get_type_name(i); if (!name) { if (i == 0) out << "type 0 osd\n"; continue; } n--; out << "type " << i << " " << name << "\n"; } out << "\n# buckets\n"; std::map<int, dcb_state_t> dcb_states; for (int bucket = -1; bucket > -1-crush.get_max_buckets(); --bucket) { int ret = decompile_bucket(bucket, dcb_states, out); if (ret) return ret; } out << "\n# rules\n"; for (int i=0; i<crush.get_max_rules(); i++) { if (!crush.rule_exists(i)) continue; out << "rule "; if (crush.get_rule_name(i)) print_rule_name(out, i, crush); out << " {\n"; out << "\tid " << i << "\n"; switch (crush.get_rule_type(i)) { case CEPH_PG_TYPE_REPLICATED: out << "\ttype replicated\n"; break; case CEPH_PG_TYPE_ERASURE: out << "\ttype erasure\n"; break; default: out << "\ttype " << crush.get_rule_type(i) << "\n"; } for (int j=0; j<crush.get_rule_len(i); j++) { switch (crush.get_rule_op(i, j)) { case CRUSH_RULE_NOOP: out << "\tstep noop\n"; break; case CRUSH_RULE_TAKE: out << "\tstep take "; { int step_item = crush.get_rule_arg1(i, j); int original_item; int c; int res = crush.split_id_class(step_item, &original_item, &c); if (res < 0) return res; if (c >= 0) step_item = original_item; print_item_name(out, step_item, crush); if (c >= 0) print_class(out, c, crush); } out << "\n"; break; case CRUSH_RULE_EMIT: out << "\tstep emit\n"; break; case CRUSH_RULE_SET_CHOOSE_TRIES: out << "\tstep set_choose_tries " << crush.get_rule_arg1(i, j) << "\n"; break; case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES: out << "\tstep set_choose_local_tries " << crush.get_rule_arg1(i, j) << "\n"; break; case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES: out << "\tstep set_choose_local_fallback_tries " << crush.get_rule_arg1(i, j) << "\n"; break; case CRUSH_RULE_SET_CHOOSELEAF_TRIES: out << "\tstep set_chooseleaf_tries " << crush.get_rule_arg1(i, j) << "\n"; break; case CRUSH_RULE_SET_CHOOSELEAF_VARY_R: out << "\tstep set_chooseleaf_vary_r " << crush.get_rule_arg1(i, j) << "\n"; break; case CRUSH_RULE_SET_CHOOSELEAF_STABLE: out << "\tstep set_chooseleaf_stable " << crush.get_rule_arg1(i, j) << "\n"; break; case CRUSH_RULE_CHOOSE_FIRSTN: out << "\tstep choose firstn " << crush.get_rule_arg1(i, j) << " type "; print_type_name(out, crush.get_rule_arg2(i, j), crush); out << "\n"; break; case CRUSH_RULE_CHOOSE_INDEP: out << "\tstep choose indep " << crush.get_rule_arg1(i, j) << " type "; print_type_name(out, crush.get_rule_arg2(i, j), crush); out << "\n"; break; case CRUSH_RULE_CHOOSELEAF_FIRSTN: out << "\tstep chooseleaf firstn " << crush.get_rule_arg1(i, j) << " type "; print_type_name(out, crush.get_rule_arg2(i, j), crush); out << "\n"; break; case CRUSH_RULE_CHOOSELEAF_INDEP: out << "\tstep chooseleaf indep " << crush.get_rule_arg1(i, j) << " type "; print_type_name(out, crush.get_rule_arg2(i, j), crush); out << "\n"; break; } } out << "}\n"; } if (crush.choose_args.size() > 0) { out << "\n# choose_args\n"; for (auto i : crush.choose_args) { int ret = decompile_choose_args(i, out); if (ret) return ret; } } out << "\n# end crush map" << std::endl; return 0; } // ================================================================ string CrushCompiler::string_node(node_t &node) { return boost::trim_copy(string(node.value.begin(), node.value.end())); } int CrushCompiler::int_node(node_t &node) { string str = string_node(node); return strtol(str.c_str(), 0, 10); } float CrushCompiler::float_node(node_t &node) { string s = string_node(node); return strtof(s.c_str(), 0); } int CrushCompiler::parse_device(iter_t const& i) { int id = int_node(i->children[1]); string name = string_node(i->children[2]); crush.set_item_name(id, name.c_str()); if (item_id.count(name)) { err << "item " << name << " defined twice" << std::endl; return -1; } item_id[name] = id; id_item[id] = name; if (verbose) err << "device " << id << " '" << name << "'"; if (i->children.size() > 3) { string c = string_node(i->children[4]); crush.set_item_class(id, c); if (verbose) err << " class" << " '" << c << "'" << std::endl; } else { if (verbose) err << std::endl; } return 0; } int CrushCompiler::parse_tunable(iter_t const& i) { string name = string_node(i->children[1]); int val = int_node(i->children[2]); if (name == "choose_local_tries") crush.set_choose_local_tries(val); else if (name == "choose_local_fallback_tries") crush.set_choose_local_fallback_tries(val); else if (name == "choose_total_tries") crush.set_choose_total_tries(val); else if (name == "chooseleaf_descend_once") crush.set_chooseleaf_descend_once(val); else if (name == "chooseleaf_vary_r") crush.set_chooseleaf_vary_r(val); else if (name == "chooseleaf_stable") crush.set_chooseleaf_stable(val); else if (name == "straw_calc_version") crush.set_straw_calc_version(val); else if (name == "allowed_bucket_algs") crush.set_allowed_bucket_algs(val); else { err << "tunable " << name << " not recognized" << std::endl; return -1; } /* current crop of tunables are all now "safe". re-enable this when we add new ones that are ... new. if (!unsafe_tunables) { err << "tunables are NOT FULLY IMPLEMENTED; enable with --enable-unsafe-tunables to enable this feature" << std::endl; return -1; } */ if (verbose) err << "tunable " << name << " " << val << std::endl; return 0; } int CrushCompiler::parse_bucket_type(iter_t const& i) { int id = int_node(i->children[1]); string name = string_node(i->children[2]); if (verbose) err << "type " << id << " '" << name << "'" << std::endl; type_id[name] = id; crush.set_type_name(id, name.c_str()); return 0; } int CrushCompiler::parse_bucket(iter_t const& i) { string tname = string_node(i->children[0]); if (!type_id.count(tname)) { err << "bucket type '" << tname << "' is not defined" << std::endl; return -1; } int type = type_id[tname]; string name = string_node(i->children[1]); if (item_id.count(name)) { err << "bucket or device '" << name << "' is already defined" << std::endl; return -1; } int id = 0; // none, yet! int alg = -1; int hash = 0; set<int> used_items; int size = 0; map<int32_t, int32_t> class_id; for (unsigned p=3; p<i->children.size()-1; p++) { iter_t sub = i->children.begin() + p; string tag = string_node(sub->children[0]); //err << "tag " << tag << std::endl; if (tag == "id") { int maybe_id = int_node(sub->children[1]); if (verbose) err << "bucket " << name << " id " << maybe_id; if (sub->children.size() > 2) { string class_name = string_node(sub->children[3]); // note that we do not verify class existence here, // as this bucket might come from an empty shadow tree // which currently has no OSDs but is still referenced by a rule! int cid = crush.get_or_create_class_id(class_name); if (class_id.count(cid) != 0) { err << "duplicate device class " << class_name << " for bucket " << name << std::endl; return -ERANGE; } class_id[cid] = maybe_id; if (verbose) err << " class" << " '" << class_name << "'" << std::endl; } else { id = maybe_id; if (verbose) err << std::endl; } } else if (tag == "alg") { string a = string_node(sub->children[1]); if (a == "uniform") alg = CRUSH_BUCKET_UNIFORM; else if (a == "list") alg = CRUSH_BUCKET_LIST; else if (a == "tree") alg = CRUSH_BUCKET_TREE; else if (a == "straw") alg = CRUSH_BUCKET_STRAW; else if (a == "straw2") alg = CRUSH_BUCKET_STRAW2; else { err << "unknown bucket alg '" << a << "'" << std::endl << std::endl; return -EINVAL; } } else if (tag == "hash") { string a = string_node(sub->children[1]); if (a == "rjenkins1") hash = CRUSH_HASH_RJENKINS1; else hash = atoi(a.c_str()); } else if (tag == "item") { // first, just determine which item pos's are already used size++; for (unsigned q = 2; q < sub->children.size(); q++) { string tag = string_node(sub->children[q++]); if (tag == "pos") { int pos = int_node(sub->children[q]); if (used_items.count(pos)) { err << "item '" << string_node(sub->children[1]) << "' in bucket '" << name << "' has explicit pos " << pos << ", which is occupied" << std::endl; return -1; } used_items.insert(pos); } } } else ceph_abort(); } // now do the items. if (!used_items.empty()) size = std::max(size, *used_items.rbegin()); vector<int> items(size); vector<int> weights(size); int curpos = 0; unsigned bucketweight = 0; bool have_uniform_weight = false; unsigned uniform_weight = 0; for (unsigned p=3; p<i->children.size()-1; p++) { iter_t sub = i->children.begin() + p; string tag = string_node(sub->children[0]); if (tag == "item") { string iname = string_node(sub->children[1]); if (!item_id.count(iname)) { err << "item '" << iname << "' in bucket '" << name << "' is not defined" << std::endl; return -1; } int itemid = item_id[iname]; unsigned weight = 0x10000; if (item_weight.count(itemid)) weight = item_weight[itemid]; int pos = -1; for (unsigned q = 2; q < sub->children.size(); q++) { string tag = string_node(sub->children[q++]); if (tag == "weight") { weight = float_node(sub->children[q]) * (float)0x10000; if (weight > CRUSH_MAX_DEVICE_WEIGHT && itemid >= 0) { err << "device weight limited to " << CRUSH_MAX_DEVICE_WEIGHT / 0x10000 << std::endl; return -ERANGE; } else if (weight > CRUSH_MAX_BUCKET_WEIGHT && itemid < 0) { err << "bucket weight limited to " << CRUSH_MAX_BUCKET_WEIGHT / 0x10000 << " to prevent overflow" << std::endl; return -ERANGE; } } else if (tag == "pos") pos = int_node(sub->children[q]); else ceph_abort(); } if (alg == CRUSH_BUCKET_UNIFORM) { if (!have_uniform_weight) { have_uniform_weight = true; uniform_weight = weight; } else { if (uniform_weight != weight) { err << "item '" << iname << "' in uniform bucket '" << name << "' has weight " << weight << " but previous item(s) have weight " << (float)uniform_weight/(float)0x10000 << "; uniform bucket items must all have identical weights." << std::endl; return -1; } } } if (pos >= size) { err << "item '" << iname << "' in bucket '" << name << "' has pos " << pos << " >= size " << size << std::endl; return -1; } if (pos < 0) { while (used_items.count(curpos)) curpos++; pos = curpos++; } //err << " item " << iname << " (" << itemid << ") pos " << pos << " weight " << weight << std::endl; items[pos] = itemid; weights[pos] = weight; if (crush_addition_is_unsafe(bucketweight, weight)) { err << "oh no! our bucket weights are overflowing all over the place, better lower the item weights" << std::endl; return -ERANGE; } bucketweight += weight; } } if (id == 0) { for (id=-1; id_item.count(id); id--) ; //err << "assigned id " << id << std::endl; } for (auto &i : class_id) class_bucket[id][i.first] = i.second; if (verbose) err << "bucket " << name << " (" << id << ") " << size << " items and weight " << (float)bucketweight / (float)0x10000 << std::endl; id_item[id] = name; item_id[name] = id; item_weight[id] = bucketweight; ceph_assert(id != 0); int idout; int r = crush.add_bucket(id, alg, hash, type, size, items.data(), weights.data(), &idout); if (r < 0) { if (r == -EEXIST) err << "Duplicate bucket id " << id << std::endl; else err << "add_bucket failed " << cpp_strerror(r) << std::endl; return r; } r = crush.set_item_name(id, name.c_str()); return r; } int CrushCompiler::parse_rule(iter_t const& i) { int start; // rule name is optional! string rname = string_node(i->children[1]); if (rname != "{") { if (rule_id.count(rname)) { err << "rule name '" << rname << "' already defined\n" << std::endl; return -1; } start = 4; } else { rname = string(); start = 3; } int ruleno = int_node(i->children[start]); string tname = string_node(i->children[start+2]); int type; if (tname == "replicated") type = CEPH_PG_TYPE_REPLICATED; else if (tname == "erasure") type = CEPH_PG_TYPE_ERASURE; else ceph_abort(); // ignore min_size+max_size and find first step int step_start = 0; int steps = 0; for (unsigned p = start + 3; p < i->children.size()-1; ++p) { string tag = string_node(i->children[p]); if (tag == "min_size" || tag == "max_size") { std::cerr << "WARNING: " << tag << " is no longer supported, ignoring" << std::endl; ++p; continue; } // has to be a step--grammer doesn't recognized anything else assert(i->children[p].value.id().to_long() == crush_grammar::_step); step_start = p; steps = i->children.size() - p - 1; break; } //err << "num steps " << steps << " start " << step_start << std::endl; if (crush.rule_exists(ruleno)) { err << "rule " << ruleno << " already exists" << std::endl; return -1; } int r = crush.add_rule(ruleno, steps, type); if (r != ruleno) { err << "unable to add rule id " << ruleno << " for rule '" << rname << "'" << std::endl; return -1; } if (rname.length()) { crush.set_rule_name(ruleno, rname.c_str()); rule_id[rname] = ruleno; } int step = 0; for (iter_t p = i->children.begin() + step_start; step < steps; p++) { iter_t s = p->children.begin() + 1; int stepid = s->value.id().to_long(); switch (stepid) { case crush_grammar::_step_take: { string item = string_node(s->children[1]); if (!item_id.count(item)) { err << "in rule '" << rname << "' item '" << item << "' not defined" << std::endl; return -1; } int id = item_id[item]; int c = -1; string class_name; if (s->children.size() > 2) { class_name = string_node(s->children[3]); c = crush.get_class_id(class_name); if (c < 0) return c; if (crush.class_bucket.count(id) == 0) { err << "in rule '" << rname << "' step take " << item << " has no class information" << std::endl; return -EINVAL; } if (crush.class_bucket[id].count(c) == 0) { err << "in rule '" << rname << "' step take " << item << " no matching bucket for class " << class_name << std::endl; return -EINVAL; } id = crush.class_bucket[id][c]; } if (verbose) { err << "rule " << rname << " take " << item; if (c < 0) err << std::endl; else err << " remapped to " << crush.get_item_name(id) << std::endl; } crush.set_rule_step_take(ruleno, step++, id); } break; case crush_grammar::_step_set_choose_tries: { int val = int_node(s->children[1]); crush.set_rule_step_set_choose_tries(ruleno, step++, val); } break; case crush_grammar::_step_set_choose_local_tries: { int val = int_node(s->children[1]); crush.set_rule_step_set_choose_local_tries(ruleno, step++, val); } break; case crush_grammar::_step_set_choose_local_fallback_tries: { int val = int_node(s->children[1]); crush.set_rule_step_set_choose_local_fallback_tries(ruleno, step++, val); } break; case crush_grammar::_step_set_chooseleaf_tries: { int val = int_node(s->children[1]); crush.set_rule_step_set_chooseleaf_tries(ruleno, step++, val); } break; case crush_grammar::_step_set_chooseleaf_vary_r: { int val = int_node(s->children[1]); crush.set_rule_step_set_chooseleaf_vary_r(ruleno, step++, val); } break; case crush_grammar::_step_set_chooseleaf_stable: { int val = int_node(s->children[1]); crush.set_rule_step_set_chooseleaf_stable(ruleno, step++, val); } break; case crush_grammar::_step_choose: case crush_grammar::_step_chooseleaf: { string type = string_node(s->children[4]); if (!type_id.count(type)) { err << "in rule '" << rname << "' type '" << type << "' not defined" << std::endl; return -1; } string choose = string_node(s->children[0]); string mode = string_node(s->children[1]); if (choose == "choose") { if (mode == "firstn") crush.set_rule_step_choose_firstn(ruleno, step++, int_node(s->children[2]), type_id[type]); else if (mode == "indep") crush.set_rule_step_choose_indep(ruleno, step++, int_node(s->children[2]), type_id[type]); else ceph_abort(); } else if (choose == "chooseleaf") { if (mode == "firstn") crush.set_rule_step_choose_leaf_firstn(ruleno, step++, int_node(s->children[2]), type_id[type]); else if (mode == "indep") crush.set_rule_step_choose_leaf_indep(ruleno, step++, int_node(s->children[2]), type_id[type]); else ceph_abort(); } else ceph_abort(); } break; case crush_grammar::_step_emit: crush.set_rule_step_emit(ruleno, step++); break; default: err << "bad crush step " << stepid << std::endl; return -1; } } ceph_assert(step == steps); return 0; } int CrushCompiler::parse_weight_set_weights(iter_t const& i, int bucket_id, crush_weight_set *weight_set) { // -2 for the enclosing [ ] __u32 size = i->children.size() - 2; __u32 bucket_size = crush.get_bucket_size(bucket_id); if (size != bucket_size) { err << bucket_id << " needs exactly " << bucket_size << " weights but got " << size << std::endl; return -1; } weight_set->size = size; weight_set->weights = (__u32 *)calloc(weight_set->size, sizeof(__u32)); __u32 pos = 0; for (iter_t p = i->children.begin() + 1; p != i->children.end(); p++, pos++) if (pos < size) weight_set->weights[pos] = float_node(*p) * (float)0x10000; return 0; } int CrushCompiler::parse_weight_set(iter_t const& i, int bucket_id, crush_choose_arg *arg) { // -3 stands for the leading "weight_set" keyword and the enclosing [ ] arg->weight_set_positions = i->children.size() - 3; arg->weight_set = (crush_weight_set *)calloc(arg->weight_set_positions, sizeof(crush_weight_set)); __u32 pos = 0; for (iter_t p = i->children.begin(); p != i->children.end(); p++) { int r = 0; switch((int)p->value.id().to_long()) { case crush_grammar::_weight_set_weights: if (pos < arg->weight_set_positions) { r = parse_weight_set_weights(p, bucket_id, &arg->weight_set[pos]); pos++; } else { err << "invalid weight_set syntax" << std::endl; r = -1; } } if (r < 0) return r; } return 0; } int CrushCompiler::parse_choose_arg_ids(iter_t const& i, int bucket_id, crush_choose_arg *arg) { // -3 for the leading "ids" keyword and the enclosing [ ] __u32 size = i->children.size() - 3; __u32 bucket_size = crush.get_bucket_size(bucket_id); if (size != bucket_size) { err << bucket_id << " needs exactly " << bucket_size << " ids but got " << size << std::endl; return -1; } arg->ids_size = size; arg->ids = (__s32 *)calloc(arg->ids_size, sizeof(__s32)); __u32 pos = 0; for (iter_t p = i->children.begin() + 2; pos < size; p++, pos++) arg->ids[pos] = int_node(*p); return 0; } int CrushCompiler::parse_choose_arg(iter_t const& i, crush_choose_arg *args) { int bucket_id = int_node(i->children[2]); if (-1-bucket_id < 0 || -1-bucket_id >= crush.get_max_buckets()) { err << bucket_id << " is out of range" << std::endl; return -1; } if (!crush.bucket_exists(bucket_id)) { err << bucket_id << " does not exist" << std::endl; return -1; } crush_choose_arg *arg = &args[-1-bucket_id]; for (iter_t p = i->children.begin(); p != i->children.end(); p++) { int r = 0; switch((int)p->value.id().to_long()) { case crush_grammar::_weight_set: r = parse_weight_set(p, bucket_id, arg); break; case crush_grammar::_choose_arg_ids: r = parse_choose_arg_ids(p, bucket_id, arg); break; } if (r < 0) return r; } return 0; } int CrushCompiler::parse_choose_args(iter_t const& i) { int choose_arg_index = int_node(i->children[1]); if (crush.choose_args.find(choose_arg_index) != crush.choose_args.end()) { err << choose_arg_index << " duplicated" << std::endl; return -1; } const auto max_buckets = crush.get_max_buckets(); if (max_buckets < 0) { err << "get_max_buckets() returned error" << std::endl; return -1; } crush_choose_arg_map arg_map; arg_map.size = max_buckets; arg_map.args = (crush_choose_arg *)calloc(arg_map.size, sizeof(crush_choose_arg)); for (iter_t p = i->children.begin() + 2; p != i->children.end(); p++) { int r = 0; switch((int)p->value.id().to_long()) { case crush_grammar::_choose_arg: r = parse_choose_arg(p, arg_map.args); break; } if (r < 0) { crush.destroy_choose_args(arg_map); return r; } } crush.choose_args[choose_arg_index] = arg_map; return 0; } void CrushCompiler::find_used_bucket_ids(iter_t const& i) { for (iter_t p = i->children.begin(); p != i->children.end(); p++) { if ((int)p->value.id().to_long() == crush_grammar::_bucket) { for (iter_t firstline = p->children.begin() + 3; firstline != p->children.end(); ++firstline) { string tag = string_node(firstline->children[0]); if (tag != "id") { break; } int id = int_node(firstline->children[1]); //err << "saw bucket id " << id << std::endl; id_item[id] = string(); } } } } int CrushCompiler::parse_crush(iter_t const& i) { find_used_bucket_ids(i); bool saw_rule = false; for (iter_t p = i->children.begin(); p != i->children.end(); p++) { int r = 0; switch (p->value.id().to_long()) { case crush_grammar::_tunable: r = parse_tunable(p); break; case crush_grammar::_device: r = parse_device(p); break; case crush_grammar::_bucket_type: r = parse_bucket_type(p); break; case crush_grammar::_bucket: if (saw_rule) { err << "buckets must be defined before rules" << std::endl; return -1; } r = parse_bucket(p); break; case crush_grammar::_crushrule: if (!saw_rule) { saw_rule = true; crush.populate_classes(class_bucket); } r = parse_rule(p); break; case crush_grammar::_choose_args: r = parse_choose_args(p); break; default: ceph_abort(); } if (r < 0) { return r; } } //err << "max_devices " << crush.get_max_devices() << std::endl; crush.finalize(); return 0; } // squash runs of whitespace to one space, excepting newlines string CrushCompiler::consolidate_whitespace(string in) { string out; bool white = false; for (unsigned p=0; p<in.length(); p++) { if (isspace(in[p]) && in[p] != '\n') { if (white) continue; white = true; } else { if (white) { if (out.length()) out += " "; white = false; } out += in[p]; } } if (verbose > 3) err << " \"" << in << "\" -> \"" << out << "\"" << std::endl; return out; } void CrushCompiler::dump(iter_t const& i, int ind) { err << "dump"; for (int j=0; j<ind; j++) cout << "\t"; long id = i->value.id().to_long(); err << id << "\t"; err << "'" << string(i->value.begin(), i->value.end()) << "' " << i->children.size() << " children" << std::endl; for (unsigned int j = 0; j < i->children.size(); j++) dump(i->children.begin() + j, ind+1); } /** * This function fix the problem like below * rack using_foo { item foo } * host foo { ... } * * if an item being used by a bucket is defined after that bucket. * CRUSH compiler will create a map by which we can * not identify that item when selecting in that bucket. **/ int CrushCompiler::adjust_bucket_item_place(iter_t const &i) { map<string,set<string> > bucket_items; map<string,iter_t> bucket_itrer; vector<string> buckets; for (iter_t p = i->children.begin(); p != i->children.end(); ++p) { if ((int)p->value.id().to_long() == crush_grammar::_bucket) { string name = string_node(p->children[1]); buckets.push_back(name); bucket_itrer[name] = p; //skip non-bucket-item children in the bucket's parse tree for (unsigned q=3; q < p->children.size()-1; ++q) { iter_t sub = p->children.begin() + q; if ((int)sub->value.id().to_long() == crush_grammar::_bucket_item) { string iname = string_node(sub->children[1]); bucket_items[name].insert(iname); } } } } //adjust the bucket for (unsigned i=0; i < buckets.size(); ++i) { for (unsigned j=i+1; j < buckets.size(); ++j) { if (bucket_items[buckets[i]].count(buckets[j])) { if (bucket_items[buckets[j]].count(buckets[i])) { err << "bucket '" << buckets[i] << "' and bucket '" << buckets[j] << "' are included each other" << std::endl; return -1; } else { std::iter_swap(bucket_itrer[buckets[i]], bucket_itrer[buckets[j]]); } } } } return 0; } int CrushCompiler::compile(istream& in, const char *infn) { if (!infn) infn = "<input>"; // always start with legacy tunables, so that the compiled result of // a given crush file is fixed for all time. crush.set_tunables_legacy(); string big; string str; int line = 1; map<int,int> line_pos; // pos -> line map<int,string> line_val; while (getline(in, str)) { // remove newline int l = str.length(); if (l && str[l - 1] == '\n') str.erase(l-1, 1); line_val[line] = str; // strip comment int n = str.find("#"); if (n >= 0) str.erase(n, str.length()-n); if (verbose>1) err << line << ": " << str << std::endl; // work around spirit crankiness by removing extraneous // whitespace. there is probably a more elegant solution, but // this only broke with the latest spirit (with the switchover to // "classic"), i don't want to spend too much time figuring it // out. string stripped = consolidate_whitespace(str); if (stripped.length() && big.length() && big[big.length()-1] != ' ') big += " "; line_pos[big.length()] = line; line++; big += stripped; } if (verbose > 2) err << "whole file is: \"" << big << "\"" << std::endl; crush_grammar crushg; const char *start = big.c_str(); //tree_parse_info<const char *> info = ast_parse(start, crushg, space_p); auto info = ast_parse(start, crushg, boost::spirit::space_p); // parse error? if (!info.full) { int cpos = info.stop - start; //out << "cpos " << cpos << std::endl; //out << " linemap " << line_pos << std::endl; ceph_assert(!line_pos.empty()); map<int,int>::iterator p = line_pos.upper_bound(cpos); if (p != line_pos.begin()) --p; int line = p->second; int pos = cpos - p->first; err << infn << ":" << line //<< ":" << (pos+1) << " error: parse error at '" << line_val[line].substr(pos) << "'" << std::endl; return -1; } int r = adjust_bucket_item_place(info.trees.begin()); if (r < 0) { return r; } //out << "parsing succeeded\n"; //dump(info.trees.begin()); return parse_crush(info.trees.begin()); }
37,841
28.266821
151
cc
null
ceph-main/src/crush/CrushCompiler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CRUSH_COMPILER_H #define CEPH_CRUSH_COMPILER_H #include "crush/CrushWrapper.h" #include "crush/grammar.h" #include <map> #include <iostream> class CrushCompiler { CrushWrapper& crush; std::ostream& err; int verbose; bool unsafe_tunables; // decompile enum dcb_state_t { DCB_STATE_IN_PROGRESS = 0, DCB_STATE_DONE }; int decompile_weight_set_weights(crush_weight_set weight_set, std::ostream &out); int decompile_weight_set(crush_weight_set *weight_set, __u32 size, std::ostream &out); int decompile_choose_arg(crush_choose_arg *arg, int bucket_id, std::ostream &out); int decompile_ids(int *ids, __u32 size, std::ostream &out); int decompile_choose_arg_map(crush_choose_arg_map arg_map, std::ostream &out); int decompile_choose_args(const std::pair<const long unsigned int, crush_choose_arg_map> &i, std::ostream &out); int decompile_bucket_impl(int i, std::ostream &out); int decompile_bucket(int cur, std::map<int, dcb_state_t>& dcb_states, std::ostream &out); // compile typedef char const* iterator_t; typedef boost::spirit::tree_match<iterator_t> parse_tree_match_t; typedef parse_tree_match_t::tree_iterator iter_t; typedef parse_tree_match_t::node_t node_t; std::map<std::string, int> item_id; std::map<int, std::string> id_item; std::map<int, unsigned> item_weight; std::map<std::string, int> type_id; std::map<std::string, int> rule_id; std::map<int32_t, std::map<int32_t, int32_t> > class_bucket; // bucket id -> class id -> shadow bucket id std::string string_node(node_t &node); int int_node(node_t &node); float float_node(node_t &node); int parse_tunable(iter_t const& i); int parse_device(iter_t const& i); int parse_bucket_type(iter_t const& i); int parse_bucket(iter_t const& i); int parse_rule(iter_t const& i); int parse_weight_set_weights(iter_t const& i, int bucket_id, crush_weight_set *weight_set); int parse_weight_set(iter_t const& i, int bucket_id, crush_choose_arg *arg); int parse_choose_arg_ids(iter_t const& i, int bucket_id, crush_choose_arg *args); int parse_choose_arg(iter_t const& i, crush_choose_arg *args); int parse_choose_args(iter_t const& i); void find_used_bucket_ids(iter_t const& i); int parse_crush(iter_t const& i); void dump(iter_t const& i, int ind=1); std::string consolidate_whitespace(std::string in); int adjust_bucket_item_place(iter_t const &i); public: CrushCompiler(CrushWrapper& c, std::ostream& eo, int verbosity=0) : crush(c), err(eo), verbose(verbosity), unsafe_tunables(false) {} ~CrushCompiler() {} void enable_unsafe_tunables() { unsafe_tunables = true; } int decompile(std::ostream& out); int compile(std::istream& in, const char *infn=0); }; #endif
2,949
30.72043
107
h
null
ceph-main/src/crush/CrushLocation.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <vector> #include "CrushLocation.h" #include "CrushWrapper.h" #include "common/ceph_context.h" #include "common/config.h" #include "common/debug.h" #include "common/errno.h" #include "include/common_fwd.h" #include "include/compat.h" #include "include/str_list.h" namespace ceph::crush { int CrushLocation::update_from_conf() { if (cct->_conf->crush_location.length()) return _parse(cct->_conf->crush_location); return 0; } int CrushLocation::_parse(const std::string& s) { std::multimap<std::string,std::string> new_crush_location; std::vector<std::string> lvec; get_str_vec(s, ";, \t", lvec); int r = CrushWrapper::parse_loc_multimap(lvec, &new_crush_location); if (r < 0) { lderr(cct) << "warning: crush_location '" << cct->_conf->crush_location << "' does not parse, keeping original crush_location " << loc << dendl; return -EINVAL; } std::lock_guard l(lock); loc.swap(new_crush_location); lgeneric_dout(cct, 10) << "crush_location is " << loc << dendl; return 0; } int CrushLocation::update_from_hook() { if (cct->_conf->crush_location_hook.length() == 0) return 0; #if defined(WITH_SEASTAR) && !defined(WITH_ALIEN) ceph_abort_msg("crimson does not support crush_location_hook, it must stay empty"); #else if (0 != access(cct->_conf->crush_location_hook.c_str(), R_OK)) { lderr(cct) << "the user define crush location hook: " << cct->_conf->crush_location_hook << " may not exist or can not access it" << dendl; return errno; } SubProcessTimed hook( cct->_conf->crush_location_hook.c_str(), SubProcess::CLOSE, SubProcess::PIPE, SubProcess::PIPE, cct->_conf->crush_location_hook_timeout); hook.add_cmd_args( "--cluster", cct->_conf->cluster.c_str(), "--id", cct->_conf->name.get_id().c_str(), "--type", cct->_conf->name.get_type_str(), NULL); int ret = hook.spawn(); if (ret != 0) { lderr(cct) << "error: failed run " << cct->_conf->crush_location_hook << ": " << hook.err() << dendl; return ret; } ceph::buffer::list bl; ret = bl.read_fd(hook.get_stdout(), 100 * 1024); if (ret < 0) { lderr(cct) << "error: failed read stdout from " << cct->_conf->crush_location_hook << ": " << cpp_strerror(-ret) << dendl; ceph::buffer::list err; err.read_fd(hook.get_stderr(), 100 * 1024); lderr(cct) << "stderr:\n"; err.hexdump(*_dout); *_dout << dendl; } if (hook.join() != 0) { lderr(cct) << "error: failed to join: " << hook.err() << dendl; return -EINVAL; } if (ret < 0) return ret; std::string out; bl.begin().copy(bl.length(), out); out.erase(out.find_last_not_of(" \n\r\t")+1); return _parse(out); #endif // WITH_SEASTAR && !WITH_ALIEN } int CrushLocation::init_on_startup() { if (cct->_conf->crush_location.length()) { return update_from_conf(); } if (cct->_conf->crush_location_hook.length()) { return update_from_hook(); } // start with a sane default char hostname[HOST_NAME_MAX + 1]; int r = gethostname(hostname, sizeof(hostname)); if (r < 0) strcpy(hostname, "unknown_host"); // use short hostname for (unsigned i=0; hostname[i]; ++i) { if (hostname[i] == '.') { hostname[i] = '\0'; break; } } std::lock_guard l(lock); loc.clear(); loc.insert(std::make_pair<std::string,std::string>("host", hostname)); loc.insert(std::make_pair<std::string,std::string>("root", "default")); return 0; } std::multimap<std::string,std::string> CrushLocation::get_location() const { std::lock_guard l(lock); return loc; } std::ostream& operator<<(std::ostream& os, const CrushLocation& loc) { bool first = true; for (auto& [type, pos] : loc.get_location()) { if (first) { first = false; } else { os << ", "; } os << '"' << type << '=' << pos << '"'; } return os; } }
3,999
25.666667
92
cc
null
ceph-main/src/crush/CrushLocation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CRUSH_LOCATION_H #define CEPH_CRUSH_LOCATION_H #include <iosfwd> #include <map> #include <string> #if FMT_VERSION >= 90000 #include <fmt/ostream.h> #endif #include "common/ceph_mutex.h" #include "include/common_fwd.h" namespace ceph::crush { class CrushLocation { public: explicit CrushLocation(CephContext *c) : cct(c) { init_on_startup(); } int update_from_conf(); ///< refresh from config int update_from_hook(); ///< call hook, if present int init_on_startup(); std::multimap<std::string,std::string> get_location() const; private: int _parse(const std::string& s); CephContext *cct; std::multimap<std::string,std::string> loc; mutable ceph::mutex lock = ceph::make_mutex("CrushLocation"); }; std::ostream& operator<<(std::ostream& os, const CrushLocation& loc); } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<ceph::crush::CrushLocation> : fmt::ostream_formatter {}; #endif #endif
1,047
21.782609
90
h
null
ceph-main/src/crush/CrushTester.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <algorithm> #include <cstdlib> #include <iostream> #include <boost/lexical_cast.hpp> #include <boost/icl/interval_map.hpp> #include <boost/algorithm/string/join.hpp> #include "common/SubProcess.h" #include "common/fork_function.h" #include "include/stringify.h" #include "CrushTester.h" #include "CrushTreeDumper.h" #include "common/ceph_context.h" #include "include/ceph_features.h" #include "common/debug.h" #define dout_subsys ceph_subsys_crush #undef dout_prefix #define dout_prefix *_dout << "CrushTester: " using std::cerr; using std::cout; using std::map; using std::ostringstream; using std::string; using std::stringstream; using std::vector; void CrushTester::set_device_weight(int dev, float f) { int w = (int)(f * 0x10000); if (w < 0) w = 0; if (w > 0x10000) w = 0x10000; device_weight[dev] = w; } int CrushTester::get_maximum_affected_by_rule(int ruleno) { // get the number of steps in RULENO int rule_size = crush.get_rule_len(ruleno); vector<int> affected_types; map<int,int> replications_by_type; for (int i = 0; i < rule_size; i++){ // get what operation is done by the current step int rule_operation = crush.get_rule_op(ruleno, i); // if the operation specifies choosing a device type, store it if (rule_operation >= 2 && rule_operation != 4){ int desired_replication = crush.get_rule_arg1(ruleno,i); int affected_type = crush.get_rule_arg2(ruleno,i); affected_types.push_back(affected_type); replications_by_type[affected_type] = desired_replication; } } /* * now for each of the affected bucket types, see what is the * maximum we are (a) requesting or (b) have */ map<int,int> max_devices_of_type; // loop through the vector of affected types for (vector<int>::iterator it = affected_types.begin(); it != affected_types.end(); ++it){ // loop through the number of buckets looking for affected types for (map<int,string>::iterator p = crush.name_map.begin(); p != crush.name_map.end(); ++p){ int bucket_type = crush.get_bucket_type(p->first); if ( bucket_type == *it) max_devices_of_type[*it]++; } } for(std::vector<int>::iterator it = affected_types.begin(); it != affected_types.end(); ++it){ if ( replications_by_type[*it] > 0 && replications_by_type[*it] < max_devices_of_type[*it] ) max_devices_of_type[*it] = replications_by_type[*it]; } /* * get the smallest number of buckets available of any type as this is our upper bound on * the number of replicas we can place */ int max_affected = std::max( crush.get_max_buckets(), crush.get_max_devices() ); for(std::vector<int>::iterator it = affected_types.begin(); it != affected_types.end(); ++it){ if (max_devices_of_type[*it] > 0 && max_devices_of_type[*it] < max_affected ) max_affected = max_devices_of_type[*it]; } return max_affected; } map<int,int> CrushTester::get_collapsed_mapping() { int num_to_check = crush.get_max_devices(); int next_id = 0; map<int, int> collapse_mask; for (int i = 0; i < num_to_check; i++){ if (crush.check_item_present(i)){ collapse_mask[i] = next_id; next_id++; } } return collapse_mask; } void CrushTester::adjust_weights(vector<__u32>& weight) { if (mark_down_device_ratio > 0) { // active buckets vector<int> bucket_ids; for (int i = 0; i < crush.get_max_buckets(); i++) { int id = -1 - i; if (crush.get_bucket_weight(id) > 0) { bucket_ids.push_back(id); } } // get buckets that are one level above a device vector<int> buckets_above_devices; for (unsigned i = 0; i < bucket_ids.size(); i++) { // grab the first child object of a bucket and check if it's ID is less than 0 int id = bucket_ids[i]; if (crush.get_bucket_size(id) == 0) continue; int first_child = crush.get_bucket_item(id, 0); // returns the ID of the bucket or device if (first_child >= 0) { buckets_above_devices.push_back(id); } } // permute bucket list for (unsigned i = 0; i < buckets_above_devices.size(); i++) { unsigned j = lrand48() % (buckets_above_devices.size() - 1); std::swap(buckets_above_devices[i], buckets_above_devices[j]); } // calculate how many buckets and devices we need to reap... int num_buckets_to_visit = (int) (mark_down_bucket_ratio * buckets_above_devices.size()); for (int i = 0; i < num_buckets_to_visit; i++) { int id = buckets_above_devices[i]; int size = crush.get_bucket_size(id); vector<int> items; for (int o = 0; o < size; o++) items.push_back(crush.get_bucket_item(id, o)); // permute items for (int o = 0; o < size; o++) { int j = lrand48() % (crush.get_bucket_size(id) - 1); std::swap(items[o], items[j]); } int local_devices_to_visit = (int) (mark_down_device_ratio*size); for (int o = 0; o < local_devices_to_visit; o++){ int item = crush.get_bucket_item(id, o); weight[item] = 0; } } } } bool CrushTester::check_valid_placement(int ruleno, vector<int> in, const vector<__u32>& weight) { bool valid_placement = true; vector<int> included_devices; map<string,string> seen_devices; // first do the easy check that all devices are "up" for (vector<int>::iterator it = in.begin(); it != in.end(); ++it) { if (weight[(*it)] == 0) { valid_placement = false; break; } else if (weight[(*it)] > 0) { included_devices.push_back( (*it) ); } } /* * now do the harder test of checking that the CRUSH rule r is not violated * we could test that none of the devices mentioned in out are unique, * but this is a special case of this test */ // get the number of steps in RULENO int rule_size = crush.get_rule_len(ruleno); vector<string> affected_types; // get the smallest type id, and name int min_map_type = crush.get_num_type_names(); for (map<int,string>::iterator it = crush.type_map.begin(); it != crush.type_map.end(); ++it ) { if ( (*it).first < min_map_type ) { min_map_type = (*it).first; } } string min_map_type_name = crush.type_map[min_map_type]; // get the types of devices affected by RULENO for (int i = 0; i < rule_size; i++) { // get what operation is done by the current step int rule_operation = crush.get_rule_op(ruleno, i); // if the operation specifies choosing a device type, store it if (rule_operation >= 2 && rule_operation != 4) { int affected_type = crush.get_rule_arg2(ruleno,i); affected_types.push_back( crush.get_type_name(affected_type)); } } // find in if we are only dealing with osd's bool only_osd_affected = false; if (affected_types.size() == 1) { if ((affected_types.back() == min_map_type_name) && (min_map_type_name == "osd")) { only_osd_affected = true; } } // check that we don't have any duplicate id's for (vector<int>::iterator it = included_devices.begin(); it != included_devices.end(); ++it) { int num_copies = std::count(included_devices.begin(), included_devices.end(), (*it) ); if (num_copies > 1) { valid_placement = false; } } // if we have more than just osd's affected we need to do a lot more work if (!only_osd_affected) { // loop through the devices that are "in/up" for (vector<int>::iterator it = included_devices.begin(); it != included_devices.end(); ++it) { if (valid_placement == false) break; // create a temporary map of the form (device type, device name in map) map<string,string> device_location_hierarchy = crush.get_full_location(*it); // loop over the types affected by RULENO looking for duplicate bucket assignments for (vector<string>::iterator t = affected_types.begin(); t != affected_types.end(); ++t) { if (seen_devices.count( device_location_hierarchy[*t])) { valid_placement = false; break; } else { // store the devices we have seen in the form of (device name, device type) seen_devices[ device_location_hierarchy[*t] ] = *t; } } } } return valid_placement; } int CrushTester::random_placement(int ruleno, vector<int>& out, int maxout, vector<__u32>& weight) { // get the total weight of the system int total_weight = 0; for (unsigned i = 0; i < weight.size(); i++) total_weight += weight[i]; if (total_weight == 0 || crush.get_max_devices() == 0) return -EINVAL; // determine the real maximum number of devices to return int devices_requested = std::min(maxout, get_maximum_affected_by_rule(ruleno)); bool accept_placement = false; vector<int> trial_placement(devices_requested); int attempted_tries = 0; int max_tries = 100; do { // create a vector to hold our trial mappings int temp_array[devices_requested]; for (int i = 0; i < devices_requested; i++){ temp_array[i] = lrand48() % (crush.get_max_devices()); } trial_placement.assign(temp_array, temp_array + devices_requested); accept_placement = check_valid_placement(ruleno, trial_placement, weight); attempted_tries++; } while (accept_placement == false && attempted_tries < max_tries); // save our random placement to the out vector if (accept_placement) out.assign(trial_placement.begin(), trial_placement.end()); // or don't.... else if (attempted_tries == max_tries) return -EINVAL; return 0; } void CrushTester::write_integer_indexed_vector_data_string(vector<string> &dst, int index, vector<int> vector_data) { stringstream data_buffer (stringstream::in | stringstream::out); unsigned input_size = vector_data.size(); // pass the indexing variable to the data buffer data_buffer << index; // pass the rest of the input data to the buffer for (unsigned i = 0; i < input_size; i++) { data_buffer << ',' << vector_data[i]; } data_buffer << std::endl; // write the data buffer to the destination dst.push_back( data_buffer.str() ); } void CrushTester::write_integer_indexed_vector_data_string(vector<string> &dst, int index, vector<float> vector_data) { stringstream data_buffer (stringstream::in | stringstream::out); unsigned input_size = vector_data.size(); // pass the indexing variable to the data buffer data_buffer << index; // pass the rest of the input data to the buffer for (unsigned i = 0; i < input_size; i++) { data_buffer << ',' << vector_data[i]; } data_buffer << std::endl; // write the data buffer to the destination dst.push_back( data_buffer.str() ); } void CrushTester::write_integer_indexed_scalar_data_string(vector<string> &dst, int index, int scalar_data) { stringstream data_buffer (stringstream::in | stringstream::out); // pass the indexing variable to the data buffer data_buffer << index; // pass the input data to the buffer data_buffer << ',' << scalar_data; data_buffer << std::endl; // write the data buffer to the destination dst.push_back( data_buffer.str() ); } void CrushTester::write_integer_indexed_scalar_data_string(vector<string> &dst, int index, float scalar_data) { stringstream data_buffer (stringstream::in | stringstream::out); // pass the indexing variable to the data buffer data_buffer << index; // pass the input data to the buffer data_buffer << ',' << scalar_data; data_buffer << std::endl; // write the data buffer to the destination dst.push_back( data_buffer.str() ); } int CrushTester::test_with_fork(CephContext* cct, int timeout) { ldout(cct, 20) << __func__ << dendl; ostringstream sink; int r = fork_function(timeout, sink, [&]() { return test(cct); }); if (r == -ETIMEDOUT) { err << "timed out during smoke test (" << timeout << " seconds)"; } return r; } namespace { class BadCrushMap : public std::runtime_error { public: int item; BadCrushMap(const char* msg, int id) : std::runtime_error(msg), item(id) {} }; // throws if any node in the crush fail to print class CrushWalker : public CrushTreeDumper::Dumper<void> { typedef void DumbFormatter; typedef CrushTreeDumper::Dumper<DumbFormatter> Parent; int max_id; public: CrushWalker(const CrushWrapper *crush, unsigned max_id) : Parent(crush, CrushTreeDumper::name_map_t()), max_id(max_id) {} void dump_item(const CrushTreeDumper::Item &qi, DumbFormatter *) override { int type = -1; if (qi.is_bucket()) { if (!crush->get_item_name(qi.id)) { throw BadCrushMap("unknown item name", qi.id); } type = crush->get_bucket_type(qi.id); } else { if (max_id > 0 && qi.id >= max_id) { throw BadCrushMap("item id too large", qi.id); } type = 0; } if (!crush->get_type_name(type)) { throw BadCrushMap("unknown type name", qi.id); } } }; } bool CrushTester::check_name_maps(unsigned max_id) const { CrushWalker crush_walker(&crush, max_id); try { // walk through the crush, to see if its self-contained crush_walker.dump(NULL); // and see if the maps is also able to handle straying OSDs, whose id >= 0. // "ceph osd tree" will try to print them, even they are not listed in the // crush map. crush_walker.dump_item(CrushTreeDumper::Item(0, 0, 0, 0), NULL); } catch (const BadCrushMap& e) { err << e.what() << ": item#" << e.item << std::endl; return false; } return true; } int CrushTester::test(CephContext* cct) { ldout(cct, 20) << dendl; if (min_rule < 0 || max_rule < 0) { min_rule = 0; max_rule = crush.get_max_rules() - 1; } if (min_x < 0 || max_x < 0) { min_x = 0; max_x = 1023; } if (min_rep < 0 && max_rep < 0) { cerr << "must specify --num-rep or both --min-rep and --max-rep" << std::endl; return -EINVAL; } // initial osd weights vector<__u32> weight; /* * note device weight is set by crushtool * (likely due to a given a command line option) */ for (int o = 0; o < crush.get_max_devices(); o++) { if (device_weight.count(o)) { weight.push_back(device_weight[o]); } else if (crush.check_item_present(o)) { weight.push_back(0x10000); } else { weight.push_back(0); } } if (output_utilization_all) cerr << "devices weights (hex): " << std::hex << weight << std::dec << std::endl; // make adjustments adjust_weights(weight); int num_devices_active = 0; for (vector<__u32>::iterator p = weight.begin(); p != weight.end(); ++p) if (*p > 0) num_devices_active++; if (output_choose_tries) crush.start_choose_profile(); for (int r = min_rule; r < crush.get_max_rules() && r <= max_rule; r++) { ldout(cct, 20) << "rule: " << r << dendl; if (!crush.rule_exists(r)) { if (output_statistics) err << "rule " << r << " dne" << std::endl; continue; } if (output_statistics) err << "rule " << r << " (" << crush.get_rule_name(r) << "), x = " << min_x << ".." << max_x << ", numrep = " << min_rep << ".." << max_rep << std::endl; for (int nr = min_rep; nr <= max_rep; nr++) { ldout(cct, 20) << "current numrep: " << nr << dendl; vector<int> per(crush.get_max_devices()); map<int,int> sizes; int num_objects = ((max_x - min_x) + 1); float num_devices = (float) per.size(); // get the total number of devices, better to cast as a float here // create a structure to hold data for post-processing tester_data_set tester_data; vector<float> vector_data_buffer_f; // create a map to hold batch-level placement information map<int, vector<int> > batch_per; int objects_per_batch = num_objects / num_batches; int batch_min = min_x; int batch_max = min_x + objects_per_batch - 1; // get the total weight of the system int total_weight = 0; for (unsigned i = 0; i < per.size(); i++) total_weight += weight[i]; if (total_weight == 0) continue; // compute the expected number of objects stored per device in the absence of weighting float expected_objects = std::min(nr, get_maximum_affected_by_rule(r)) * num_objects; // compute each device's proportional weight vector<float> proportional_weights( per.size() ); for (unsigned i = 0; i < per.size(); i++) proportional_weights[i] = (float) weight[i] / (float) total_weight; if (output_data_file) { // stage the absolute weight information for post-processing for (unsigned i = 0; i < per.size(); i++) { tester_data.absolute_weights[i] = (float) weight[i] / (float)0x10000; } // stage the proportional weight information for post-processing for (unsigned i = 0; i < per.size(); i++) { if (proportional_weights[i] > 0 ) tester_data.proportional_weights[i] = proportional_weights[i]; tester_data.proportional_weights_all[i] = proportional_weights[i]; } } // compute the expected number of objects stored per device when a device's weight is considered vector<float> num_objects_expected(num_devices); for (unsigned i = 0; i < num_devices; i++) num_objects_expected[i] = (proportional_weights[i]*expected_objects); for (int current_batch = 0; current_batch < num_batches; current_batch++) { if (current_batch == (num_batches - 1)) { batch_max = max_x; objects_per_batch = (batch_max - batch_min + 1); } float batch_expected_objects = std::min(nr, get_maximum_affected_by_rule(r)) * objects_per_batch; vector<float> batch_num_objects_expected( per.size() ); for (unsigned i = 0; i < per.size() ; i++) batch_num_objects_expected[i] = (proportional_weights[i]*batch_expected_objects); // create a vector to hold placement results temporarily vector<int> temporary_per ( per.size() ); for (int x = batch_min; x <= batch_max; x++) { // create a vector to hold the results of a CRUSH placement or RNG simulation vector<int> out; if (use_crush) { if (output_mappings) err << "CRUSH"; // prepend CRUSH to placement output uint32_t real_x = x; if (pool_id != -1) { real_x = crush_hash32_2(CRUSH_HASH_RJENKINS1, x, (uint32_t)pool_id); } crush.do_rule(r, real_x, out, nr, weight, 0); } else { if (output_mappings) err << "RNG"; // prepend RNG to placement output to denote simulation // test our new monte carlo placement generator random_placement(r, out, nr, weight); } if (output_mappings) err << " rule " << r << " x " << x << " " << out << std::endl; if (output_data_file) write_integer_indexed_vector_data_string(tester_data.placement_information, x, out); bool has_item_none = false; for (unsigned i = 0; i < out.size(); i++) { if (out[i] != CRUSH_ITEM_NONE) { per[out[i]]++; temporary_per[out[i]]++; } else { has_item_none = true; } } batch_per[current_batch] = temporary_per; sizes[out.size()]++; if (output_bad_mappings && (out.size() != (unsigned)nr || has_item_none)) { err << "bad mapping rule " << r << " x " << x << " num_rep " << nr << " result " << out << std::endl; } } batch_min = batch_max + 1; batch_max = batch_min + objects_per_batch - 1; } for (unsigned i = 0; i < per.size(); i++) if (output_utilization && !output_statistics) err << " device " << i << ":\t" << per[i] << std::endl; for (map<int,int>::iterator p = sizes.begin(); p != sizes.end(); ++p) if (output_statistics) err << "rule " << r << " (" << crush.get_rule_name(r) << ") num_rep " << nr << " result size == " << p->first << ":\t" << p->second << "/" << (max_x-min_x+1) << std::endl; if (output_statistics) for (unsigned i = 0; i < per.size(); i++) { if (output_utilization) { if (num_objects_expected[i] > 0 && per[i] > 0) { err << " device " << i << ":\t" << "\t" << " stored " << ": " << per[i] << "\t" << " expected " << ": " << num_objects_expected[i] << std::endl; } } else if (output_utilization_all) { err << " device " << i << ":\t" << "\t" << " stored " << ": " << per[i] << "\t" << " expected " << ": " << num_objects_expected[i] << std::endl; } } ldout(cct, 20) << "output statistics created" << dendl; if (output_data_file) for (unsigned i = 0; i < per.size(); i++) { vector_data_buffer_f.clear(); vector_data_buffer_f.push_back( (float) per[i]); vector_data_buffer_f.push_back( (float) num_objects_expected[i]); write_integer_indexed_vector_data_string(tester_data.device_utilization_all, i, vector_data_buffer_f); if (num_objects_expected[i] > 0 && per[i] > 0) write_integer_indexed_vector_data_string(tester_data.device_utilization, i, vector_data_buffer_f); } if (output_data_file && num_batches > 1) { // stage batch utilization information for post-processing for (int i = 0; i < num_batches; i++) { write_integer_indexed_vector_data_string(tester_data.batch_device_utilization_all, i, batch_per[i]); write_integer_indexed_vector_data_string(tester_data.batch_device_expected_utilization_all, i, batch_per[i]); } } ldout(cct, 20) << "output data file created" << dendl; string rule_tag = crush.get_rule_name(r); if (output_csv) write_data_set_to_csv(output_data_file_name+rule_tag,tester_data); ldout(cct, 20) << "successfully written csv" << dendl; } } if (output_choose_tries) { __u32 *v = 0; int n = crush.get_choose_profile(&v); for (int i=0; i<n; i++) { cout.setf(std::ios::right); cout << std::setw(2) << i << ": " << std::setw(9) << v[i]; cout.unsetf(std::ios::right); cout << std::endl; } crush.stop_choose_profile(); } return 0; } int CrushTester::compare(CrushWrapper& crush2) { if (min_rule < 0 || max_rule < 0) { min_rule = 0; max_rule = crush.get_max_rules() - 1; } if (min_x < 0 || max_x < 0) { min_x = 0; max_x = 1023; } // initial osd weights vector<__u32> weight; /* * note device weight is set by crushtool * (likely due to a given a command line option) */ for (int o = 0; o < crush.get_max_devices(); o++) { if (device_weight.count(o)) { weight.push_back(device_weight[o]); } else if (crush.check_item_present(o)) { weight.push_back(0x10000); } else { weight.push_back(0); } } // make adjustments adjust_weights(weight); map<int,int> bad_by_rule; int ret = 0; for (int r = min_rule; r < crush.get_max_rules() && r <= max_rule; r++) { if (!crush.rule_exists(r)) { if (output_statistics) err << "rule " << r << " dne" << std::endl; continue; } int bad = 0; for (int nr = min_rep; nr <= max_rep; nr++) { for (int x = min_x; x <= max_x; ++x) { vector<int> out; crush.do_rule(r, x, out, nr, weight, 0); vector<int> out2; crush2.do_rule(r, x, out2, nr, weight, 0); if (out != out2) { ++bad; } } } if (bad) { ret = -1; } int max = (max_rep - min_rep + 1) * (max_x - min_x + 1); double ratio = (double)bad / (double)max; cout << "rule " << r << " had " << bad << "/" << max << " mismatched mappings (" << ratio << ")" << std::endl; } if (ret) { cerr << "warning: maps are NOT equivalent" << std::endl; } else { cout << "maps appear equivalent" << std::endl; } return ret; }
24,318
30.789542
119
cc
null
ceph-main/src/crush/CrushTester.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CRUSH_TESTER_H #define CEPH_CRUSH_TESTER_H #include "crush/CrushWrapper.h" #include "include/common_fwd.h" #include <fstream> class CrushTester { CrushWrapper& crush; std::ostream& err; std::map<int, int> device_weight; int min_rule, max_rule; int min_x, max_x; int min_rep, max_rep; int64_t pool_id; int num_batches; bool use_crush; float mark_down_device_ratio; float mark_down_bucket_ratio; bool output_utilization; bool output_utilization_all; bool output_statistics; bool output_mappings; bool output_bad_mappings; bool output_choose_tries; bool output_data_file; bool output_csv; std::string output_data_file_name; /* * mark a ratio of devices down, can be used to simulate placement distributions * under degrated cluster conditions */ void adjust_weights(std::vector<__u32>& weight); /* * Get the maximum number of devices that could be selected to satisfy ruleno. */ int get_maximum_affected_by_rule(int ruleno); /* * for maps where in devices have non-sequential id numbers, return a mapping of device id * to a sequential id number. For example, if we have devices with id's 0 1 4 5 6 return a map * where: * 0 = 0 * 1 = 1 * 4 = 2 * 5 = 3 * 6 = 4 * * which can help make post-processing easier */ std::map<int,int> get_collapsed_mapping(); /* * Essentially a re-implementation of CRUSH. Given a vector of devices * check that the vector represents a valid placement for a given ruleno. */ bool check_valid_placement(int ruleno, std::vector<int> in, const std::vector<__u32>& weight); /* * Generate a random selection of devices which satisfies ruleno. Essentially a * monte-carlo simulator for CRUSH placements which can be used to compare the * statistical distribution of the CRUSH algorithm to a random number generator */ int random_placement(int ruleno, std::vector<int>& out, int maxout, std::vector<__u32>& weight); // scaffolding to store data for off-line processing struct tester_data_set { std::vector<std::string> device_utilization; std::vector<std::string> device_utilization_all; std::vector<std::string> placement_information; std::vector<std::string> batch_device_utilization_all; std::vector<std::string> batch_device_expected_utilization_all; std::map<int, float> proportional_weights; std::map<int, float> proportional_weights_all; std::map<int, float> absolute_weights; } ; void write_to_csv(std::ofstream& csv_file, std::vector<std::string>& payload) { if (csv_file.good()) for (std::vector<std::string>::iterator it = payload.begin(); it != payload.end(); ++it) csv_file << (*it); } void write_to_csv(std::ofstream& csv_file, std::map<int, float>& payload) { if (csv_file.good()) for (std::map<int, float>::iterator it = payload.begin(); it != payload.end(); ++it) csv_file << (*it).first << ',' << (*it).second << std::endl; } void write_data_set_to_csv(std::string user_tag, tester_data_set& tester_data) { std::ofstream device_utilization_file((user_tag + (std::string)"-device_utilization.csv").c_str()); std::ofstream device_utilization_all_file((user_tag + (std::string)"-device_utilization_all.csv").c_str()); std::ofstream placement_information_file((user_tag + (std::string)"-placement_information.csv").c_str()); std::ofstream proportional_weights_file((user_tag + (std::string)"-proportional_weights.csv").c_str()); std::ofstream proportional_weights_all_file((user_tag + (std::string)"-proportional_weights_all.csv").c_str()); std::ofstream absolute_weights_file((user_tag + (std::string)"-absolute_weights.csv").c_str()); // write the headers device_utilization_file << "Device ID, Number of Objects Stored, Number of Objects Expected" << std::endl; device_utilization_all_file << "Device ID, Number of Objects Stored, Number of Objects Expected" << std::endl; proportional_weights_file << "Device ID, Proportional Weight" << std::endl; proportional_weights_all_file << "Device ID, Proportional Weight" << std::endl; absolute_weights_file << "Device ID, Absolute Weight" << std::endl; placement_information_file << "Input"; for (int i = 0; i < max_rep; i++) { placement_information_file << ", OSD" << i; } placement_information_file << std::endl; write_to_csv(device_utilization_file, tester_data.device_utilization); write_to_csv(device_utilization_all_file, tester_data.device_utilization_all); write_to_csv(placement_information_file, tester_data.placement_information); write_to_csv(proportional_weights_file, tester_data.proportional_weights); write_to_csv(proportional_weights_all_file, tester_data.proportional_weights_all); write_to_csv(absolute_weights_file, tester_data.absolute_weights); device_utilization_file.close(); device_utilization_all_file.close(); placement_information_file.close(); proportional_weights_file.close(); absolute_weights_file.close(); if (num_batches > 1) { std::ofstream batch_device_utilization_all_file ((user_tag + (std::string)"-batch_device_utilization_all.csv").c_str()); std::ofstream batch_device_expected_utilization_all_file ((user_tag + (std::string)"-batch_device_expected_utilization_all.csv").c_str()); batch_device_utilization_all_file << "Batch Round"; for (unsigned i = 0; i < tester_data.device_utilization.size(); i++) { batch_device_utilization_all_file << ", Objects Stored on OSD" << i; } batch_device_utilization_all_file << std::endl; batch_device_expected_utilization_all_file << "Batch Round"; for (unsigned i = 0; i < tester_data.device_utilization.size(); i++) { batch_device_expected_utilization_all_file << ", Objects Expected on OSD" << i; } batch_device_expected_utilization_all_file << std::endl; write_to_csv(batch_device_utilization_all_file, tester_data.batch_device_utilization_all); write_to_csv(batch_device_expected_utilization_all_file, tester_data.batch_device_expected_utilization_all); batch_device_expected_utilization_all_file.close(); batch_device_utilization_all_file.close(); } } void write_integer_indexed_vector_data_string(std::vector<std::string> &dst, int index, std::vector<int> vector_data); void write_integer_indexed_vector_data_string(std::vector<std::string> &dst, int index, std::vector<float> vector_data); void write_integer_indexed_scalar_data_string(std::vector<std::string> &dst, int index, int scalar_data); void write_integer_indexed_scalar_data_string(std::vector<std::string> &dst, int index, float scalar_data); public: CrushTester(CrushWrapper& c, std::ostream& eo) : crush(c), err(eo), min_rule(-1), max_rule(-1), min_x(-1), max_x(-1), min_rep(-1), max_rep(-1), pool_id(-1), num_batches(1), use_crush(true), mark_down_device_ratio(0.0), mark_down_bucket_ratio(1.0), output_utilization(false), output_utilization_all(false), output_statistics(false), output_mappings(false), output_bad_mappings(false), output_choose_tries(false), output_data_file(false), output_csv(false), output_data_file_name("") { } void set_output_data_file_name(std::string name) { output_data_file_name = name; } std::string get_output_data_file_name() const { return output_data_file_name; } void set_output_data_file(bool b) { output_data_file = b; } bool get_output_data_file() const { return output_data_file; } void set_output_csv(bool b) { output_csv = b; } bool get_output_csv() const { return output_csv; } void set_output_utilization(bool b) { output_utilization = b; } bool get_output_utilization() const { return output_utilization; } void set_output_utilization_all(bool b) { output_utilization_all = b; } bool get_output_utilization_all() const { return output_utilization_all; } void set_output_statistics(bool b) { output_statistics = b; } bool get_output_statistics() const { return output_statistics; } void set_output_mappings(bool b) { output_mappings = b; } bool get_output_mappings() const { return output_mappings; } void set_output_bad_mappings(bool b) { output_bad_mappings = b; } bool get_output_bad_mappings() const { return output_bad_mappings; } void set_output_choose_tries(bool b) { output_choose_tries = b; } bool get_output_choose_tries() const { return output_choose_tries; } void set_batches(int b) { num_batches = b; } int get_batches() const { return num_batches; } void set_random_placement() { use_crush = false; } bool get_random_placement() const { return use_crush == false; } void set_bucket_down_ratio(float bucket_ratio) { mark_down_bucket_ratio = bucket_ratio; } float get_bucket_down_ratio() const { return mark_down_bucket_ratio; } void set_device_down_ratio(float device_ratio) { mark_down_device_ratio = device_ratio; } float set_device_down_ratio() const { return mark_down_device_ratio; } void set_device_weight(int dev, float f); void set_min_rep(int r) { min_rep = r; } int get_min_rep() const { return min_rep; } void set_max_rep(int r) { max_rep = r; } int get_max_rep() const { return max_rep; } void set_num_rep(int r) { min_rep = max_rep = r; } void set_min_x(int x) { min_x = x; } void set_pool_id(int64_t x){ pool_id = x; } int get_min_x() const { return min_x; } void set_max_x(int x) { max_x = x; } int get_max_x() const { return max_x; } void set_x(int x) { min_x = max_x = x; } void set_min_rule(int rule) { min_rule = rule; } int get_min_rule() const { return min_rule; } void set_max_rule(int rule) { max_rule = rule; } int get_max_rule() const { return max_rule; } void set_rule(int rule) { min_rule = max_rule = rule; } /** * check if any bucket/nodes is referencing an unknown name or type * @param max_id rejects any non-bucket items with id less than this number, * pass 0 to disable this check * @return false if an dangling name/type is referenced or an item id is too * large, true otherwise */ bool check_name_maps(unsigned max_id = 0) const; int test(CephContext* cct); int test_with_fork(CephContext* cct, int timeout); int compare(CrushWrapper& other); }; #endif
10,880
29.393855
145
h
null
ceph-main/src/crush/CrushTreeDumper.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph distributed storage system * * Copyright (C) 2015 Mirantis Inc * * Author: Mykola Golub <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CRUSH_TREE_DUMPER_H #define CRUSH_TREE_DUMPER_H #include "CrushWrapper.h" #include "include/stringify.h" /** * CrushTreeDumper: * A helper class and functions to dump a crush tree. * * Example: * * class SimpleDumper : public CrushTreeDumper::Dumper<ostream> { * public: * SimpleDumper(const CrushWrapper *crush) : * CrushTreeDumper::Dumper<ostream>(crush) {} * protected: * virtual void dump_item(const CrushTreeDumper::Item &qi, ostream *out) { * *out << qi.id; * for (int k = 0; k < qi.depth; k++) * *out << "-"; * if (qi.is_bucket()) * *out << crush->get_item_name(qi.id) * else * *out << "osd." << qi.id; * *out << "\n"; * } * }; * * SimpleDumper(crush).dump(out); * */ namespace CrushTreeDumper { struct Item { int id; int parent; int depth; float weight; std::list<int> children; Item() : id(0), parent(0), depth(0), weight(0) {} Item(int i, int p, int d, float w) : id(i), parent(p), depth(d), weight(w) {} bool is_bucket() const { return id < 0; } }; template <typename F> class Dumper : public std::list<Item> { public: explicit Dumper(const CrushWrapper *crush_, const name_map_t& weight_set_names_) : crush(crush_), weight_set_names(weight_set_names_) { crush->find_nonshadow_roots(&roots); root = roots.begin(); } explicit Dumper(const CrushWrapper *crush_, const name_map_t& weight_set_names_, bool show_shadow) : crush(crush_), weight_set_names(weight_set_names_) { if (show_shadow) { crush->find_roots(&roots); } else { crush->find_nonshadow_roots(&roots); } root = roots.begin(); } virtual ~Dumper() {} virtual void reset() { root = roots.begin(); touched.clear(); clear(); } virtual bool should_dump_leaf(int i) const { return true; } virtual bool should_dump_empty_bucket() const { return true; } bool should_dump(int id) { if (id >= 0) return should_dump_leaf(id); if (should_dump_empty_bucket()) return true; int s = crush->get_bucket_size(id); for (int k = s - 1; k >= 0; k--) { int c = crush->get_bucket_item(id, k); if (should_dump(c)) return true; } return false; } bool next(Item &qi) { if (empty()) { while (root != roots.end() && !should_dump(*root)) ++root; if (root == roots.end()) return false; push_back(Item(*root, 0, 0, crush->get_bucket_weightf(*root))); ++root; } qi = front(); pop_front(); touched.insert(qi.id); if (qi.is_bucket()) { // queue bucket contents, sorted by (class, name) int s = crush->get_bucket_size(qi.id); std::map<std::string, std::pair<int,float>> sorted; for (int k = s - 1; k >= 0; k--) { int id = crush->get_bucket_item(qi.id, k); if (should_dump(id)) { std::string sort_by; if (id >= 0) { const char *c = crush->get_item_class(id); sort_by = c ? c : ""; sort_by += "_"; char nn[80]; snprintf(nn, sizeof(nn), "osd.%08d", id); sort_by += nn; } else { sort_by = "_"; sort_by += crush->get_item_name(id); } sorted[sort_by] = std::make_pair( id, crush->get_bucket_item_weightf(qi.id, k)); } } for (auto p = sorted.rbegin(); p != sorted.rend(); ++p) { qi.children.push_back(p->second.first); push_front(Item(p->second.first, qi.id, qi.depth + 1, p->second.second)); } } return true; } void dump(F *f) { reset(); Item qi; while (next(qi)) dump_item(qi, f); } bool is_touched(int id) const { return touched.count(id) > 0; } void set_root(const std::string& bucket) { roots.clear(); if (crush->name_exists(bucket)) { int i = crush->get_item_id(bucket); roots.insert(i); } } protected: virtual void dump_item(const Item &qi, F *f) = 0; protected: const CrushWrapper *crush; const name_map_t &weight_set_names; private: std::set<int> touched; std::set<int> roots; std::set<int>::iterator root; }; inline void dump_item_fields(const CrushWrapper *crush, const name_map_t& weight_set_names, const Item &qi, ceph::Formatter *f) { f->dump_int("id", qi.id); const char *c = crush->get_item_class(qi.id); if (c) f->dump_string("device_class", c); if (qi.is_bucket()) { int type = crush->get_bucket_type(qi.id); f->dump_string("name", crush->get_item_name(qi.id)); f->dump_string("type", crush->get_type_name(type)); f->dump_int("type_id", type); } else { f->dump_stream("name") << "osd." << qi.id; f->dump_string("type", crush->get_type_name(0)); f->dump_int("type_id", 0); f->dump_float("crush_weight", qi.weight); f->dump_unsigned("depth", qi.depth); } if (qi.parent < 0) { f->open_object_section("pool_weights"); for (auto& p : crush->choose_args) { const crush_choose_arg_map& cmap = p.second; int bidx = -1 - qi.parent; const crush_bucket *b = crush->get_bucket(qi.parent); if (b && bidx < (int)cmap.size && cmap.args[bidx].weight_set && cmap.args[bidx].weight_set_positions >= 1) { int bpos; for (bpos = 0; bpos < (int)cmap.args[bidx].weight_set[0].size && b->items[bpos] != qi.id; ++bpos) ; std::string name; if (p.first == CrushWrapper::DEFAULT_CHOOSE_ARGS) { name = "(compat)"; } else { auto q = weight_set_names.find(p.first); name = q != weight_set_names.end() ? q->second : stringify(p.first); } f->open_array_section(name.c_str()); for (unsigned opos = 0; opos < cmap.args[bidx].weight_set_positions; ++opos) { float w = (float)cmap.args[bidx].weight_set[opos].weights[bpos] / (float)0x10000; f->dump_float("weight", w); } f->close_section(); } } f->close_section(); } } inline void dump_bucket_children(const CrushWrapper *crush, const Item &qi, ceph::Formatter *f) { if (!qi.is_bucket()) return; f->open_array_section("children"); for (std::list<int>::const_iterator i = qi.children.begin(); i != qi.children.end(); ++i) { f->dump_int("child", *i); } f->close_section(); } class FormattingDumper : public Dumper<ceph::Formatter> { public: explicit FormattingDumper(const CrushWrapper *crush, const name_map_t& weight_set_names) : Dumper<ceph::Formatter>(crush, weight_set_names) {} explicit FormattingDumper(const CrushWrapper *crush, const name_map_t& weight_set_names, bool show_shadow) : Dumper<ceph::Formatter>(crush, weight_set_names, show_shadow) {} protected: void dump_item(const Item &qi, ceph::Formatter *f) override { f->open_object_section("item"); dump_item_fields(qi, f); dump_bucket_children(qi, f); f->close_section(); } virtual void dump_item_fields(const Item &qi, ceph::Formatter *f) { CrushTreeDumper::dump_item_fields(crush, weight_set_names, qi, f); } virtual void dump_bucket_children(const Item &qi, ceph::Formatter *f) { CrushTreeDumper::dump_bucket_children(crush, qi, f); } }; } #endif
7,915
26.109589
81
h
null
ceph-main/src/crush/CrushWrapper.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "osd/osd_types.h" #include "common/debug.h" #include "common/Formatter.h" #include "common/errno.h" #include "common/TextTable.h" #include "include/stringify.h" #include "CrushWrapper.h" #include "CrushTreeDumper.h" #define dout_subsys ceph_subsys_crush using std::cout; using std::list; using std::map; using std::make_pair; using std::ostream; using std::ostringstream; using std::pair; using std::set; using std::string; using std::vector; using ceph::bufferlist; using ceph::decode; using ceph::decode_nohead; using ceph::encode; using ceph::Formatter; bool CrushWrapper::has_non_straw2_buckets() const { for (int i=0; i<crush->max_buckets; ++i) { crush_bucket *b = crush->buckets[i]; if (!b) continue; if (b->alg != CRUSH_BUCKET_STRAW2) return true; } return false; } bool CrushWrapper::has_v2_rules() const { for (unsigned i=0; i<crush->max_rules; i++) { if (is_v2_rule(i)) { return true; } } return false; } bool CrushWrapper::is_v2_rule(unsigned ruleid) const { // check rule for use of indep or new SET_* rule steps if (ruleid >= crush->max_rules) return false; crush_rule *r = crush->rules[ruleid]; if (!r) return false; for (unsigned j=0; j<r->len; j++) { if (r->steps[j].op == CRUSH_RULE_CHOOSE_INDEP || r->steps[j].op == CRUSH_RULE_CHOOSELEAF_INDEP || r->steps[j].op == CRUSH_RULE_SET_CHOOSE_TRIES || r->steps[j].op == CRUSH_RULE_SET_CHOOSELEAF_TRIES) { return true; } } return false; } bool CrushWrapper::has_v3_rules() const { for (unsigned i=0; i<crush->max_rules; i++) { if (is_v3_rule(i)) { return true; } } return false; } bool CrushWrapper::is_v3_rule(unsigned ruleid) const { // check rule for use of SET_CHOOSELEAF_VARY_R step if (ruleid >= crush->max_rules) return false; crush_rule *r = crush->rules[ruleid]; if (!r) return false; for (unsigned j=0; j<r->len; j++) { if (r->steps[j].op == CRUSH_RULE_SET_CHOOSELEAF_VARY_R) { return true; } } return false; } bool CrushWrapper::has_v4_buckets() const { for (int i=0; i<crush->max_buckets; ++i) { crush_bucket *b = crush->buckets[i]; if (!b) continue; if (b->alg == CRUSH_BUCKET_STRAW2) return true; } return false; } bool CrushWrapper::has_v5_rules() const { for (unsigned i=0; i<crush->max_rules; i++) { if (is_v5_rule(i)) { return true; } } return false; } bool CrushWrapper::is_v5_rule(unsigned ruleid) const { // check rule for use of SET_CHOOSELEAF_STABLE step if (ruleid >= crush->max_rules) return false; crush_rule *r = crush->rules[ruleid]; if (!r) return false; for (unsigned j=0; j<r->len; j++) { if (r->steps[j].op == CRUSH_RULE_SET_CHOOSELEAF_STABLE) { return true; } } return false; } bool CrushWrapper::has_choose_args() const { return !choose_args.empty(); } bool CrushWrapper::has_incompat_choose_args() const { if (choose_args.empty()) return false; if (choose_args.size() > 1) return true; if (choose_args.begin()->first != DEFAULT_CHOOSE_ARGS) return true; crush_choose_arg_map arg_map = choose_args.begin()->second; for (__u32 i = 0; i < arg_map.size; i++) { crush_choose_arg *arg = &arg_map.args[i]; if (arg->weight_set_positions == 0 && arg->ids_size == 0) continue; if (arg->weight_set_positions != 1) return true; if (arg->ids_size != 0) return true; } return false; } int CrushWrapper::split_id_class(int i, int *idout, int *classout) const { if (!item_exists(i)) return -EINVAL; string name = get_item_name(i); size_t pos = name.find("~"); if (pos == string::npos) { *idout = i; *classout = -1; return 0; } string name_no_class = name.substr(0, pos); if (!name_exists(name_no_class)) return -ENOENT; string class_name = name.substr(pos + 1); if (!class_exists(class_name)) return -ENOENT; *idout = get_item_id(name_no_class); *classout = get_class_id(class_name); return 0; } int CrushWrapper::can_rename_item(const string& srcname, const string& dstname, ostream *ss) const { if (name_exists(srcname)) { if (name_exists(dstname)) { *ss << "dstname = '" << dstname << "' already exists"; return -EEXIST; } if (is_valid_crush_name(dstname)) { return 0; } else { *ss << "dstname = '" << dstname << "' does not match [-_.0-9a-zA-Z]+"; return -EINVAL; } } else { if (name_exists(dstname)) { *ss << "srcname = '" << srcname << "' does not exist " << "and dstname = '" << dstname << "' already exists"; return -EALREADY; } else { *ss << "srcname = '" << srcname << "' does not exist"; return -ENOENT; } } } int CrushWrapper::rename_item(const string& srcname, const string& dstname, ostream *ss) { int ret = can_rename_item(srcname, dstname, ss); if (ret < 0) return ret; int oldid = get_item_id(srcname); return set_item_name(oldid, dstname); } int CrushWrapper::can_rename_bucket(const string& srcname, const string& dstname, ostream *ss) const { int ret = can_rename_item(srcname, dstname, ss); if (ret) return ret; int srcid = get_item_id(srcname); if (srcid >= 0) { *ss << "srcname = '" << srcname << "' is not a bucket " << "because its id = " << srcid << " is >= 0"; return -ENOTDIR; } return 0; } int CrushWrapper::rename_bucket(const string& srcname, const string& dstname, ostream *ss) { int ret = can_rename_bucket(srcname, dstname, ss); if (ret < 0) return ret; int oldid = get_item_id(srcname); return set_item_name(oldid, dstname); } int CrushWrapper::rename_rule(const string& srcname, const string& dstname, ostream *ss) { if (!rule_exists(srcname)) { if (ss) { *ss << "source rule name '" << srcname << "' does not exist"; } return -ENOENT; } if (rule_exists(dstname)) { if (ss) { *ss << "destination rule name '" << dstname << "' already exists"; } return -EEXIST; } int rule_id = get_rule_id(srcname); auto it = rule_name_map.find(rule_id); ceph_assert(it != rule_name_map.end()); it->second = dstname; if (have_rmaps) { rule_name_rmap.erase(srcname); rule_name_rmap[dstname] = rule_id; } return 0; } void CrushWrapper::find_takes(set<int> *roots) const { for (unsigned i=0; i<crush->max_rules; i++) { crush_rule *r = crush->rules[i]; if (!r) continue; for (unsigned j=0; j<r->len; j++) { if (r->steps[j].op == CRUSH_RULE_TAKE) roots->insert(r->steps[j].arg1); } } } void CrushWrapper::find_takes_by_rule(int rule, set<int> *roots) const { if (rule < 0 || rule >= (int)crush->max_rules) return; crush_rule *r = crush->rules[rule]; if (!r) return; for (unsigned i = 0; i < r->len; i++) { if (r->steps[i].op == CRUSH_RULE_TAKE) roots->insert(r->steps[i].arg1); } } void CrushWrapper::find_roots(set<int> *roots) const { for (int i = 0; i < crush->max_buckets; i++) { if (!crush->buckets[i]) continue; crush_bucket *b = crush->buckets[i]; if (!_search_item_exists(b->id)) roots->insert(b->id); } } bool CrushWrapper::subtree_contains(int root, int item) const { if (root == item) return true; if (root >= 0) return false; // root is a leaf const crush_bucket *b = get_bucket(root); if (IS_ERR(b)) return false; for (unsigned j=0; j<b->size; j++) { if (subtree_contains(b->items[j], item)) return true; } return false; } bool CrushWrapper::_maybe_remove_last_instance(CephContext *cct, int item, bool unlink_only) { // last instance? if (_search_item_exists(item)) { return false; } if (item < 0 && _bucket_is_in_use(item)) { return false; } if (item < 0 && !unlink_only) { crush_bucket *t = get_bucket(item); ldout(cct, 5) << "_maybe_remove_last_instance removing bucket " << item << dendl; crush_remove_bucket(crush, t); if (class_bucket.count(item) != 0) class_bucket.erase(item); class_remove_item(item); update_choose_args(cct); } if ((item >= 0 || !unlink_only) && name_map.count(item)) { ldout(cct, 5) << "_maybe_remove_last_instance removing name for item " << item << dendl; name_map.erase(item); have_rmaps = false; if (item >= 0 && !unlink_only) { class_remove_item(item); } } rebuild_roots_with_classes(cct); return true; } int CrushWrapper::remove_root(CephContext *cct, int item) { crush_bucket *b = get_bucket(item); if (IS_ERR(b)) { // should be idempotent // e.g.: we use 'crush link' to link same host into // different roots, which as a result can cause different // shadow trees reference same hosts too. This means // we may need to destory the same buckets(hosts, racks, etc.) // multiple times during rebuilding all shadow trees. return 0; } for (unsigned n = 0; n < b->size; n++) { if (b->items[n] >= 0) continue; int r = remove_root(cct, b->items[n]); if (r < 0) return r; } crush_remove_bucket(crush, b); if (name_map.count(item) != 0) { name_map.erase(item); have_rmaps = false; } if (class_bucket.count(item) != 0) class_bucket.erase(item); class_remove_item(item); update_choose_args(cct); return 0; } void CrushWrapper::update_choose_args(CephContext *cct) { for (auto& i : choose_args) { crush_choose_arg_map &arg_map = i.second; assert(arg_map.size == (unsigned)crush->max_buckets); unsigned positions = get_choose_args_positions(arg_map); for (int j = 0; j < crush->max_buckets; ++j) { crush_bucket *b = crush->buckets[j]; assert(j < (int)arg_map.size); auto& carg = arg_map.args[j]; // strip out choose_args for any buckets that no longer exist if (!b || b->alg != CRUSH_BUCKET_STRAW2) { if (carg.ids) { if (cct) ldout(cct,10) << __func__ << " removing " << i.first << " bucket " << (-1-j) << " ids" << dendl; free(carg.ids); carg.ids = 0; carg.ids_size = 0; } if (carg.weight_set) { if (cct) ldout(cct,10) << __func__ << " removing " << i.first << " bucket " << (-1-j) << " weight_sets" << dendl; for (unsigned p = 0; p < carg.weight_set_positions; ++p) { free(carg.weight_set[p].weights); } free(carg.weight_set); carg.weight_set = 0; carg.weight_set_positions = 0; } continue; } if (carg.weight_set_positions == 0) { continue; // skip it } if (carg.weight_set_positions != positions) { if (cct) lderr(cct) << __func__ << " " << i.first << " bucket " << (-1-j) << " positions " << carg.weight_set_positions << " -> " << positions << dendl; continue; // wth... skip! } // mis-sized weight_sets? this shouldn't ever happen. for (unsigned p = 0; p < positions; ++p) { if (carg.weight_set[p].size != b->size) { if (cct) lderr(cct) << __func__ << " fixing " << i.first << " bucket " << (-1-j) << " position " << p << " size " << carg.weight_set[p].size << " -> " << b->size << dendl; auto old_ws = carg.weight_set[p]; carg.weight_set[p].size = b->size; carg.weight_set[p].weights = (__u32*)calloc(b->size, sizeof(__u32)); auto max = std::min<unsigned>(old_ws.size, b->size); for (unsigned k = 0; k < max; ++k) { carg.weight_set[p].weights[k] = old_ws.weights[k]; } free(old_ws.weights); } } } } } int CrushWrapper::remove_item(CephContext *cct, int item, bool unlink_only) { ldout(cct, 5) << "remove_item " << item << (unlink_only ? " unlink_only":"") << dendl; int ret = -ENOENT; if (item < 0 && !unlink_only) { crush_bucket *t = get_bucket(item); if (IS_ERR(t)) { ldout(cct, 1) << "remove_item bucket " << item << " does not exist" << dendl; return -ENOENT; } if (t->size) { ldout(cct, 1) << "remove_item bucket " << item << " has " << t->size << " items, not empty" << dendl; return -ENOTEMPTY; } if (_bucket_is_in_use(item)) { return -EBUSY; } } for (int i = 0; i < crush->max_buckets; i++) { if (!crush->buckets[i]) continue; crush_bucket *b = crush->buckets[i]; for (unsigned i=0; i<b->size; ++i) { int id = b->items[i]; if (id == item) { ldout(cct, 5) << "remove_item removing item " << item << " from bucket " << b->id << dendl; adjust_item_weight_in_bucket(cct, item, 0, b->id, true); bucket_remove_item(b, item); ret = 0; } } } if (_maybe_remove_last_instance(cct, item, unlink_only)) ret = 0; return ret; } bool CrushWrapper::_search_item_exists(int item) const { for (int i = 0; i < crush->max_buckets; i++) { if (!crush->buckets[i]) continue; crush_bucket *b = crush->buckets[i]; for (unsigned j=0; j<b->size; ++j) { if (b->items[j] == item) return true; } } return false; } bool CrushWrapper::_bucket_is_in_use(int item) { for (auto &i : class_bucket) for (auto &j : i.second) if (j.second == item) return true; for (unsigned i = 0; i < crush->max_rules; ++i) { crush_rule *r = crush->rules[i]; if (!r) continue; for (unsigned j = 0; j < r->len; ++j) { if (r->steps[j].op == CRUSH_RULE_TAKE) { int step_item = r->steps[j].arg1; int original_item; int c; int res = split_id_class(step_item, &original_item, &c); if (res < 0) return false; if (step_item == item || original_item == item) return true; } } } return false; } int CrushWrapper::_remove_item_under( CephContext *cct, int item, int ancestor, bool unlink_only) { ldout(cct, 5) << "_remove_item_under " << item << " under " << ancestor << (unlink_only ? " unlink_only":"") << dendl; if (ancestor >= 0) { return -EINVAL; } if (!bucket_exists(ancestor)) return -EINVAL; int ret = -ENOENT; crush_bucket *b = get_bucket(ancestor); for (unsigned i=0; i<b->size; ++i) { int id = b->items[i]; if (id == item) { ldout(cct, 5) << "_remove_item_under removing item " << item << " from bucket " << b->id << dendl; adjust_item_weight_in_bucket(cct, item, 0, b->id, true); bucket_remove_item(b, item); ret = 0; } else if (id < 0) { int r = remove_item_under(cct, item, id, unlink_only); if (r == 0) ret = 0; } } return ret; } int CrushWrapper::remove_item_under( CephContext *cct, int item, int ancestor, bool unlink_only) { ldout(cct, 5) << "remove_item_under " << item << " under " << ancestor << (unlink_only ? " unlink_only":"") << dendl; if (!unlink_only && _bucket_is_in_use(item)) { return -EBUSY; } int ret = _remove_item_under(cct, item, ancestor, unlink_only); if (ret < 0) return ret; if (item < 0 && !unlink_only) { crush_bucket *t = get_bucket(item); if (IS_ERR(t)) { ldout(cct, 1) << "remove_item_under bucket " << item << " does not exist" << dendl; return -ENOENT; } if (t->size) { ldout(cct, 1) << "remove_item_under bucket " << item << " has " << t->size << " items, not empty" << dendl; return -ENOTEMPTY; } } if (_maybe_remove_last_instance(cct, item, unlink_only)) ret = 0; return ret; } int CrushWrapper::get_common_ancestor_distance(CephContext *cct, int id, const std::multimap<string,string>& loc) const { ldout(cct, 5) << __func__ << " " << id << " " << loc << dendl; if (!item_exists(id)) return -ENOENT; map<string,string> id_loc = get_full_location(id); ldout(cct, 20) << " id is at " << id_loc << dendl; for (map<int,string>::const_iterator p = type_map.begin(); p != type_map.end(); ++p) { map<string,string>::iterator ip = id_loc.find(p->second); if (ip == id_loc.end()) continue; for (std::multimap<string,string>::const_iterator q = loc.find(p->second); q != loc.end(); ++q) { if (q->first != p->second) break; if (q->second == ip->second) return p->first; } } return -ERANGE; } int CrushWrapper::parse_loc_map(const std::vector<string>& args, std::map<string,string> *ploc) { ploc->clear(); for (unsigned i = 0; i < args.size(); ++i) { const char *s = args[i].c_str(); const char *pos = strchr(s, '='); if (!pos) return -EINVAL; string key(s, 0, pos-s); string value(pos+1); if (value.length()) (*ploc)[key] = value; else return -EINVAL; } return 0; } int CrushWrapper::parse_loc_multimap(const std::vector<string>& args, std::multimap<string,string> *ploc) { ploc->clear(); for (unsigned i = 0; i < args.size(); ++i) { const char *s = args[i].c_str(); const char *pos = strchr(s, '='); if (!pos) return -EINVAL; string key(s, 0, pos-s); string value(pos+1); if (value.length()) ploc->insert(make_pair(key, value)); else return -EINVAL; } return 0; } bool CrushWrapper::check_item_loc(CephContext *cct, int item, const map<string,string>& loc, int *weight) { ldout(cct, 5) << "check_item_loc item " << item << " loc " << loc << dendl; for (map<int,string>::const_iterator p = type_map.begin(); p != type_map.end(); ++p) { // ignore device if (p->first == 0) continue; // ignore types that aren't specified in loc map<string,string>::const_iterator q = loc.find(p->second); if (q == loc.end()) { ldout(cct, 2) << "warning: did not specify location for '" << p->second << "' level (levels are " << type_map << ")" << dendl; continue; } if (!name_exists(q->second)) { ldout(cct, 5) << "check_item_loc bucket " << q->second << " dne" << dendl; return false; } int id = get_item_id(q->second); if (id >= 0) { ldout(cct, 5) << "check_item_loc requested " << q->second << " for type " << p->second << " is a device, not bucket" << dendl; return false; } ceph_assert(bucket_exists(id)); crush_bucket *b = get_bucket(id); // see if item exists in this bucket for (unsigned j=0; j<b->size; j++) { if (b->items[j] == item) { ldout(cct, 2) << "check_item_loc " << item << " exists in bucket " << b->id << dendl; if (weight) *weight = crush_get_bucket_item_weight(b, j); return true; } } return false; } ldout(cct, 2) << __func__ << " item " << item << " loc " << loc << dendl; return false; } map<string, string> CrushWrapper::get_full_location(int id) const { vector<pair<string, string> > full_location_ordered; map<string,string> full_location; get_full_location_ordered(id, full_location_ordered); std::copy(full_location_ordered.begin(), full_location_ordered.end(), std::inserter(full_location, full_location.begin())); return full_location; } int CrushWrapper::get_full_location(const string& name, map<string,string> *ploc) { build_rmaps(); auto p = name_rmap.find(name); if (p == name_rmap.end()) { return -ENOENT; } *ploc = get_full_location(p->second); return 0; } int CrushWrapper::get_full_location_ordered(int id, vector<pair<string, string> >& path) const { if (!item_exists(id)) return -ENOENT; int cur = id; int ret; while (true) { pair<string, string> parent_coord = get_immediate_parent(cur, &ret); if (ret != 0) break; path.push_back(parent_coord); cur = get_item_id(parent_coord.second); } return 0; } string CrushWrapper::get_full_location_ordered_string(int id) const { vector<pair<string, string> > full_location_ordered; string full_location; get_full_location_ordered(id, full_location_ordered); reverse(begin(full_location_ordered), end(full_location_ordered)); for(auto i = full_location_ordered.begin(); i != full_location_ordered.end(); i++) { full_location = full_location + i->first + "=" + i->second; if (i != full_location_ordered.end() - 1) { full_location = full_location + ","; } } return full_location; } map<int, string> CrushWrapper::get_parent_hierarchy(int id) const { map<int,string> parent_hierarchy; pair<string, string> parent_coord = get_immediate_parent(id); int parent_id; // get the integer type for id and create a counter from there int type_counter = get_bucket_type(id); // if we get a negative type then we can assume that we have an OSD // change behavior in get_item_type FIXME if (type_counter < 0) type_counter = 0; // read the type map and get the name of the type with the largest ID int high_type = 0; if (!type_map.empty()) high_type = type_map.rbegin()->first; parent_id = get_item_id(parent_coord.second); while (type_counter < high_type) { type_counter++; parent_hierarchy[ type_counter ] = parent_coord.first; if (type_counter < high_type){ // get the coordinate information for the next parent parent_coord = get_immediate_parent(parent_id); parent_id = get_item_id(parent_coord.second); } } return parent_hierarchy; } int CrushWrapper::get_children(int id, list<int> *children) const { // leaf? if (id >= 0) { return 0; } auto *b = get_bucket(id); if (IS_ERR(b)) { return -ENOENT; } for (unsigned n=0; n<b->size; n++) { children->push_back(b->items[n]); } return b->size; } int CrushWrapper::get_all_children(int id, set<int> *children) const { // leaf? if (id >= 0) { return 0; } auto *b = get_bucket(id); if (IS_ERR(b)) { return -ENOENT; } int c = 0; for (unsigned n = 0; n < b->size; n++) { children->insert(b->items[n]); c++; auto r = get_all_children(b->items[n], children); if (r < 0) return r; c += r; } return c; } void CrushWrapper::get_children_of_type(int id, int type, vector<int> *children, bool exclude_shadow) const { if (id >= 0) { if (type == 0) { // want leaf? children->push_back(id); } return; } auto b = get_bucket(id); if (IS_ERR(b)) { return; } if (b->type < type) { // give up return; } else if (b->type == type) { if (!is_shadow_item(b->id) || !exclude_shadow) { children->push_back(b->id); } return; } for (unsigned n = 0; n < b->size; n++) { get_children_of_type(b->items[n], type, children, exclude_shadow); } } int CrushWrapper::verify_upmap(CephContext *cct, int rule_id, int pool_size, const vector<int>& up) { auto rule = get_rule(rule_id); if (IS_ERR(rule) || !rule) { lderr(cct) << __func__ << " rule " << rule_id << " does not exist" << dendl; return -ENOENT; } int root_bucket = 0; int cursor = 0; std::map<int, int> type_stack; for (unsigned step = 0; step < rule->len; ++step) { auto curstep = &rule->steps[step]; ldout(cct, 10) << __func__ << " step " << step << dendl; switch (curstep->op) { case CRUSH_RULE_TAKE: { root_bucket = curstep->arg1; } break; case CRUSH_RULE_CHOOSELEAF_FIRSTN: case CRUSH_RULE_CHOOSELEAF_INDEP: { int numrep = curstep->arg1; int type = curstep->arg2; if (numrep <= 0) numrep += pool_size; type_stack.emplace(type, numrep); if (type == 0) // osd break; map<int, set<int>> osds_by_parent; // parent_of_desired_type -> osds for (auto osd : up) { auto parent = get_parent_of_type(osd, type, rule_id); if (parent < 0) { osds_by_parent[parent].insert(osd); } else { ldout(cct, 1) << __func__ << " unable to get parent of osd." << osd << ", skipping for now" << dendl; } } for (auto i : osds_by_parent) { if (i.second.size() > 1) { lderr(cct) << __func__ << " multiple osds " << i.second << " come from same failure domain " << i.first << dendl; return -EINVAL; } } } break; case CRUSH_RULE_CHOOSE_FIRSTN: case CRUSH_RULE_CHOOSE_INDEP: { int numrep = curstep->arg1; int type = curstep->arg2; if (numrep <= 0) numrep += pool_size; type_stack.emplace(type, numrep); if (type == 0) // osd break; set<int> parents_of_type; for (auto osd : up) { auto parent = get_parent_of_type(osd, type, rule_id); if (parent < 0) { parents_of_type.insert(parent); } else { ldout(cct, 1) << __func__ << " unable to get parent of osd." << osd << ", skipping for now" << dendl; } } if ((int)parents_of_type.size() > numrep) { lderr(cct) << __func__ << " number of buckets " << parents_of_type.size() << " exceeds desired " << numrep << dendl; return -EINVAL; } } break; case CRUSH_RULE_EMIT: { if (root_bucket < 0) { int num_osds = 1; for (auto &item : type_stack) { num_osds *= item.second; } // validate the osd's in subtree for (int c = 0; cursor < (int)up.size() && c < num_osds; ++cursor, ++c) { int osd = up[cursor]; if (!subtree_contains(root_bucket, osd)) { lderr(cct) << __func__ << " osd " << osd << " not in bucket " << root_bucket << dendl; return -EINVAL; } } } type_stack.clear(); root_bucket = 0; } break; default: // ignore break; } } return 0; } int CrushWrapper::_get_leaves(int id, list<int> *leaves) const { ceph_assert(leaves); // Already leaf? if (id >= 0) { leaves->push_back(id); return 0; } auto b = get_bucket(id); if (IS_ERR(b)) { return -ENOENT; } for (unsigned n = 0; n < b->size; n++) { if (b->items[n] >= 0) { leaves->push_back(b->items[n]); } else { // is a bucket, do recursive call int r = _get_leaves(b->items[n], leaves); if (r < 0) { return r; } } } return 0; // all is well } int CrushWrapper::get_leaves(const string &name, set<int> *leaves) const { ceph_assert(leaves); leaves->clear(); if (!name_exists(name)) { return -ENOENT; } int id = get_item_id(name); if (id >= 0) { // already leaf leaves->insert(id); return 0; } list<int> unordered; int r = _get_leaves(id, &unordered); if (r < 0) { return r; } for (auto &p : unordered) { leaves->insert(p); } return 0; } int CrushWrapper::insert_item( CephContext *cct, int item, float weight, string name, const map<string,string>& loc, // typename -> bucketname bool init_weight_sets) { ldout(cct, 5) << "insert_item item " << item << " weight " << weight << " name " << name << " loc " << loc << dendl; if (!is_valid_crush_name(name)) return -EINVAL; if (!is_valid_crush_loc(cct, loc)) return -EINVAL; int r = validate_weightf(weight); if (r < 0) { return r; } if (name_exists(name)) { if (get_item_id(name) != item) { ldout(cct, 10) << "device name '" << name << "' already exists as id " << get_item_id(name) << dendl; return -EEXIST; } } else { set_item_name(item, name); } int cur = item; // 1. create locations if locations don't exist // 2. add child in the location with 0 weight. // Check more detail of insert_item method declared in // CrushWrapper.h for (auto p = type_map.begin(); p != type_map.end(); ++p) { // ignore device type if (p->first == 0) continue; // skip types that are unspecified map<string,string>::const_iterator q = loc.find(p->second); if (q == loc.end()) { ldout(cct, 2) << "warning: did not specify location for '" << p->second << "' level (levels are " << type_map << ")" << dendl; continue; } if (!name_exists(q->second)) { ldout(cct, 5) << "insert_item creating bucket " << q->second << dendl; int zero_weight = 0, new_bucket_id; int r = add_bucket(0, 0, CRUSH_HASH_DEFAULT, p->first, 1, &cur, &zero_weight, &new_bucket_id); if (r < 0) { ldout(cct, 1) << "add_bucket failure error: " << cpp_strerror(r) << dendl; return r; } set_item_name(new_bucket_id, q->second); cur = new_bucket_id; continue; } // add to an existing bucket int id = get_item_id(q->second); if (!bucket_exists(id)) { ldout(cct, 1) << "insert_item doesn't have bucket " << id << dendl; return -EINVAL; } // check that we aren't creating a cycle. if (subtree_contains(id, cur)) { ldout(cct, 1) << "insert_item item " << cur << " already exists beneath " << id << dendl; return -EINVAL; } // we have done sanity check above crush_bucket *b = get_bucket(id); if (p->first != b->type) { ldout(cct, 1) << "insert_item existing bucket has type " << "'" << type_map[b->type] << "' != " << "'" << type_map[p->first] << "'" << dendl; return -EINVAL; } // are we forming a loop? if (subtree_contains(cur, b->id)) { ldout(cct, 1) << "insert_item " << cur << " already contains " << b->id << "; cannot form loop" << dendl; return -ELOOP; } ldout(cct, 5) << "insert_item adding " << cur << " weight " << weight << " to bucket " << id << dendl; [[maybe_unused]] int r = bucket_add_item(b, cur, 0); ceph_assert(!r); break; } // adjust the item's weight in location if (adjust_item_weightf_in_loc(cct, item, weight, loc, item >= 0 && init_weight_sets) > 0) { if (item >= crush->max_devices) { crush->max_devices = item + 1; ldout(cct, 5) << "insert_item max_devices now " << crush->max_devices << dendl; } r = rebuild_roots_with_classes(cct); if (r < 0) { ldout(cct, 0) << __func__ << " unable to rebuild roots with classes: " << cpp_strerror(r) << dendl; return r; } return 0; } ldout(cct, 1) << "error: didn't find anywhere to add item " << item << " in " << loc << dendl; return -EINVAL; } int CrushWrapper::move_bucket( CephContext *cct, int id, const map<string,string>& loc) { // sorry this only works for buckets if (id >= 0) return -EINVAL; if (!item_exists(id)) return -ENOENT; // get the name of the bucket we are trying to move for later string id_name = get_item_name(id); // detach the bucket int bucket_weight = detach_bucket(cct, id); // insert the bucket back into the hierarchy return insert_item(cct, id, bucket_weight / (float)0x10000, id_name, loc, false); } int CrushWrapper::detach_bucket(CephContext *cct, int item) { if (!crush) return (-EINVAL); if (item >= 0) return (-EINVAL); // check that the bucket that we want to detach exists ceph_assert(bucket_exists(item)); // get the bucket's weight crush_bucket *b = get_bucket(item); unsigned bucket_weight = b->weight; // get where the bucket is located pair<string, string> bucket_location = get_immediate_parent(item); // get the id of the parent bucket int parent_id = get_item_id(bucket_location.second); // get the parent bucket crush_bucket *parent_bucket = get_bucket(parent_id); if (!IS_ERR(parent_bucket)) { // zero out the bucket weight adjust_item_weight_in_bucket(cct, item, 0, parent_bucket->id, true); // remove the bucket from the parent bucket_remove_item(parent_bucket, item); } else if (PTR_ERR(parent_bucket) != -ENOENT) { return PTR_ERR(parent_bucket); } // check that we're happy int test_weight = 0; map<string,string> test_location; test_location[ bucket_location.first ] = (bucket_location.second); bool successful_detach = !(check_item_loc(cct, item, test_location, &test_weight)); ceph_assert(successful_detach); ceph_assert(test_weight == 0); return bucket_weight; } bool CrushWrapper::is_parent_of(int child, int p) const { int parent = 0; while (!get_immediate_parent_id(child, &parent)) { if (parent == p) { return true; } child = parent; } return false; } int CrushWrapper::swap_bucket(CephContext *cct, int src, int dst) { if (src >= 0 || dst >= 0) return -EINVAL; if (!item_exists(src) || !item_exists(dst)) return -EINVAL; crush_bucket *a = get_bucket(src); crush_bucket *b = get_bucket(dst); if (is_parent_of(a->id, b->id) || is_parent_of(b->id, a->id)) { return -EINVAL; } unsigned aw = a->weight; unsigned bw = b->weight; // swap weights adjust_item_weight(cct, a->id, bw); adjust_item_weight(cct, b->id, aw); // swap items map<int,unsigned> tmp; unsigned as = a->size; unsigned bs = b->size; for (unsigned i = 0; i < as; ++i) { int item = a->items[0]; int itemw = crush_get_bucket_item_weight(a, 0); tmp[item] = itemw; bucket_remove_item(a, item); } ceph_assert(a->size == 0); ceph_assert(b->size == bs); for (unsigned i = 0; i < bs; ++i) { int item = b->items[0]; int itemw = crush_get_bucket_item_weight(b, 0); bucket_remove_item(b, item); bucket_add_item(a, item, itemw); } ceph_assert(a->size == bs); ceph_assert(b->size == 0); for (auto t : tmp) { bucket_add_item(b, t.first, t.second); } ceph_assert(a->size == bs); ceph_assert(b->size == as); // swap names swap_names(src, dst); return rebuild_roots_with_classes(cct); } int CrushWrapper::link_bucket( CephContext *cct, int id, const map<string,string>& loc) { // sorry this only works for buckets if (id >= 0) return -EINVAL; if (!item_exists(id)) return -ENOENT; // get the name of the bucket we are trying to move for later string id_name = get_item_name(id); crush_bucket *b = get_bucket(id); unsigned bucket_weight = b->weight; return insert_item(cct, id, bucket_weight / (float)0x10000, id_name, loc); } int CrushWrapper::create_or_move_item( CephContext *cct, int item, float weight, string name, const map<string,string>& loc, // typename -> bucketname bool init_weight_sets) { int ret = 0; int old_iweight; if (!is_valid_crush_name(name)) return -EINVAL; if (check_item_loc(cct, item, loc, &old_iweight)) { ldout(cct, 5) << "create_or_move_item " << item << " already at " << loc << dendl; } else { if (_search_item_exists(item)) { weight = get_item_weightf(item); ldout(cct, 10) << "create_or_move_item " << item << " exists with weight " << weight << dendl; remove_item(cct, item, true); } ldout(cct, 5) << "create_or_move_item adding " << item << " weight " << weight << " at " << loc << dendl; ret = insert_item(cct, item, weight, name, loc, item >= 0 && init_weight_sets); if (ret == 0) ret = 1; // changed } return ret; } int CrushWrapper::update_item( CephContext *cct, int item, float weight, string name, const map<string,string>& loc) // typename -> bucketname { ldout(cct, 5) << "update_item item " << item << " weight " << weight << " name " << name << " loc " << loc << dendl; int ret = 0; if (!is_valid_crush_name(name)) return -EINVAL; if (!is_valid_crush_loc(cct, loc)) return -EINVAL; ret = validate_weightf(weight); if (ret < 0) { return ret; } // compare quantized (fixed-point integer) weights! int iweight = (int)(weight * (float)0x10000); int old_iweight; if (check_item_loc(cct, item, loc, &old_iweight)) { ldout(cct, 5) << "update_item " << item << " already at " << loc << dendl; if (old_iweight != iweight) { ldout(cct, 5) << "update_item " << item << " adjusting weight " << ((float)old_iweight/(float)0x10000) << " -> " << weight << dendl; adjust_item_weight_in_loc(cct, item, iweight, loc); ret = rebuild_roots_with_classes(cct); if (ret < 0) { ldout(cct, 0) << __func__ << " unable to rebuild roots with classes: " << cpp_strerror(ret) << dendl; return ret; } ret = 1; } if (get_item_name(item) != name) { ldout(cct, 5) << "update_item setting " << item << " name to " << name << dendl; set_item_name(item, name); ret = 1; } } else { if (item_exists(item)) { remove_item(cct, item, true); } ldout(cct, 5) << "update_item adding " << item << " weight " << weight << " at " << loc << dendl; ret = insert_item(cct, item, weight, name, loc); if (ret == 0) ret = 1; // changed } return ret; } int CrushWrapper::get_item_weight(int id) const { for (int bidx = 0; bidx < crush->max_buckets; bidx++) { crush_bucket *b = crush->buckets[bidx]; if (b == NULL) continue; if (b->id == id) return b->weight; for (unsigned i = 0; i < b->size; i++) if (b->items[i] == id) return crush_get_bucket_item_weight(b, i); } return -ENOENT; } int CrushWrapper::get_item_weight_in_loc(int id, const map<string,string> &loc) { for (map<string,string>::const_iterator l = loc.begin(); l != loc.end(); ++l) { int bid = get_item_id(l->second); if (!bucket_exists(bid)) continue; crush_bucket *b = get_bucket(bid); for (unsigned int i = 0; i < b->size; i++) { if (b->items[i] == id) { return crush_get_bucket_item_weight(b, i); } } } return -ENOENT; } int CrushWrapper::adjust_item_weight(CephContext *cct, int id, int weight, bool update_weight_sets) { ldout(cct, 5) << __func__ << " " << id << " weight " << weight << " update_weight_sets=" << (int)update_weight_sets << dendl; int changed = 0; for (int bidx = 0; bidx < crush->max_buckets; bidx++) { if (!crush->buckets[bidx]) { continue; } int r = adjust_item_weight_in_bucket(cct, id, weight, -1-bidx, update_weight_sets); if (r > 0) { ++changed; } } if (!changed) { return -ENOENT; } return changed; } int CrushWrapper::adjust_item_weight_in_bucket( CephContext *cct, int id, int weight, int bucket_id, bool update_weight_sets) { ldout(cct, 5) << __func__ << " " << id << " weight " << weight << " in bucket " << bucket_id << " update_weight_sets=" << (int)update_weight_sets << dendl; int changed = 0; if (!bucket_exists(bucket_id)) { return -ENOENT; } crush_bucket *b = get_bucket(bucket_id); for (unsigned int i = 0; i < b->size; i++) { if (b->items[i] == id) { int diff = bucket_adjust_item_weight(cct, b, id, weight, update_weight_sets); ldout(cct, 5) << __func__ << " " << id << " diff " << diff << " in bucket " << bucket_id << dendl; adjust_item_weight(cct, bucket_id, b->weight, false); changed++; } } // update weight-sets so they continue to sum for (auto& p : choose_args) { auto &cmap = p.second; if (!cmap.args) { continue; } crush_choose_arg *arg = &cmap.args[-1 - bucket_id]; if (!arg->weight_set) { continue; } ceph_assert(arg->weight_set_positions > 0); vector<int> w(arg->weight_set_positions); for (unsigned i = 0; i < b->size; ++i) { for (unsigned j = 0; j < arg->weight_set_positions; ++j) { crush_weight_set *weight_set = &arg->weight_set[j]; w[j] += weight_set->weights[i]; } } ldout(cct,5) << __func__ << " adjusting bucket " << bucket_id << " cmap " << p.first << " weights to " << w << dendl; ostringstream ss; choose_args_adjust_item_weight(cct, cmap, bucket_id, w, &ss); } if (!changed) { return -ENOENT; } return changed; } int CrushWrapper::adjust_item_weight_in_loc( CephContext *cct, int id, int weight, const map<string,string>& loc, bool update_weight_sets) { ldout(cct, 5) << "adjust_item_weight_in_loc " << id << " weight " << weight << " in " << loc << " update_weight_sets=" << (int)update_weight_sets << dendl; int changed = 0; for (auto l = loc.begin(); l != loc.end(); ++l) { int bid = get_item_id(l->second); if (!bucket_exists(bid)) continue; int r = adjust_item_weight_in_bucket(cct, id, weight, bid, update_weight_sets); if (r > 0) { ++changed; } } if (!changed) { return -ENOENT; } return changed; } int CrushWrapper::adjust_subtree_weight(CephContext *cct, int id, int weight, bool update_weight_sets) { ldout(cct, 5) << __func__ << " " << id << " weight " << weight << dendl; crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); int changed = 0; list<crush_bucket*> q; q.push_back(b); while (!q.empty()) { b = q.front(); q.pop_front(); int local_changed = 0; for (unsigned i=0; i<b->size; ++i) { int n = b->items[i]; if (n >= 0) { adjust_item_weight_in_bucket(cct, n, weight, b->id, update_weight_sets); ++changed; ++local_changed; } else { crush_bucket *sub = get_bucket(n); if (IS_ERR(sub)) continue; q.push_back(sub); } } } int ret = rebuild_roots_with_classes(cct); if (ret < 0) { ldout(cct, 0) << __func__ << " unable to rebuild roots with classes: " << cpp_strerror(ret) << dendl; return ret; } return changed; } bool CrushWrapper::check_item_present(int id) const { bool found = false; for (int bidx = 0; bidx < crush->max_buckets; bidx++) { crush_bucket *b = crush->buckets[bidx]; if (b == 0) continue; for (unsigned i = 0; i < b->size; i++) if (b->items[i] == id) found = true; } return found; } pair<string,string> CrushWrapper::get_immediate_parent(int id, int *_ret) const { for (int bidx = 0; bidx < crush->max_buckets; bidx++) { crush_bucket *b = crush->buckets[bidx]; if (b == 0) continue; if (is_shadow_item(b->id)) continue; for (unsigned i = 0; i < b->size; i++) if (b->items[i] == id) { string parent_id = name_map.at(b->id); string parent_bucket_type = type_map.at(b->type); if (_ret) *_ret = 0; return make_pair(parent_bucket_type, parent_id); } } if (_ret) *_ret = -ENOENT; return pair<string, string>(); } int CrushWrapper::get_immediate_parent_id(int id, int *parent) const { for (int bidx = 0; bidx < crush->max_buckets; bidx++) { crush_bucket *b = crush->buckets[bidx]; if (b == 0) continue; if (is_shadow_item(b->id)) continue; for (unsigned i = 0; i < b->size; i++) { if (b->items[i] == id) { *parent = b->id; return 0; } } } return -ENOENT; } int CrushWrapper::get_parent_of_type(int item, int type, int rule) const { if (rule < 0) { // no rule specified do { int r = get_immediate_parent_id(item, &item); if (r < 0) { return 0; } } while (get_bucket_type(item) != type); return item; } set<int> roots; find_takes_by_rule(rule, &roots); for (auto root : roots) { vector<int> candidates; get_children_of_type(root, type, &candidates, false); for (auto candidate : candidates) { if (subtree_contains(candidate, item)) { // note that here we assure that no two different buckets // from a single crush rule will share a same device, // which should generally be true. return candidate; } } } return 0; // not found } void CrushWrapper::get_subtree_of_type(int type, vector<int> *subtrees) { set<int> roots; find_roots(&roots); for (auto r: roots) { crush_bucket *b = get_bucket(r); if (IS_ERR(b)) continue; get_children_of_type(b->id, type, subtrees); } } bool CrushWrapper::class_is_in_use(int class_id, ostream *ss) { list<unsigned> rules; for (unsigned i = 0; i < crush->max_rules; ++i) { crush_rule *r = crush->rules[i]; if (!r) continue; for (unsigned j = 0; j < r->len; ++j) { if (r->steps[j].op == CRUSH_RULE_TAKE) { int root = r->steps[j].arg1; for (auto &p : class_bucket) { auto& q = p.second; if (q.count(class_id) && q[class_id] == root) { rules.push_back(i); } } } } } if (rules.empty()) { return false; } if (ss) { ostringstream os; for (auto &p: rules) { os << "'" << get_rule_name(p) <<"',"; } string out(os.str()); out.resize(out.size() - 1); // drop last ',' *ss << "still referenced by crush_rule(s): " << out; } return true; } int CrushWrapper::rename_class(const string& srcname, const string& dstname) { auto i = class_rname.find(srcname); if (i == class_rname.end()) return -ENOENT; auto j = class_rname.find(dstname); if (j != class_rname.end()) return -EEXIST; int class_id = i->second; ceph_assert(class_name.count(class_id)); // rename any shadow buckets of old class name for (auto &it: class_map) { if (it.first < 0 && it.second == class_id) { string old_name = get_item_name(it.first); size_t pos = old_name.find("~"); ceph_assert(pos != string::npos); string name_no_class = old_name.substr(0, pos); string old_class_name = old_name.substr(pos + 1); ceph_assert(old_class_name == srcname); string new_name = name_no_class + "~" + dstname; // we do not use set_item_name // because the name is intentionally invalid name_map[it.first] = new_name; have_rmaps = false; } } // rename class class_rname.erase(srcname); class_name.erase(class_id); class_rname[dstname] = class_id; class_name[class_id] = dstname; return 0; } int CrushWrapper::populate_classes( const std::map<int32_t, map<int32_t, int32_t>>& old_class_bucket) { // build set of previous used shadow ids set<int32_t> used_ids; for (auto& p : old_class_bucket) { for (auto& q : p.second) { used_ids.insert(q.second); } } // accumulate weight values for each carg and bucket as we go. because it is // depth first, we will have the nested bucket weights we need when we // finish constructing the containing buckets. map<int,map<int,vector<int>>> cmap_item_weight; // cargs -> bno -> [bucket weight for each position] set<int> roots; find_nonshadow_roots(&roots); for (auto &r : roots) { assert(r < 0); for (auto &c : class_name) { int clone; int res = device_class_clone(r, c.first, old_class_bucket, used_ids, &clone, &cmap_item_weight); if (res < 0) return res; } } return 0; } int CrushWrapper::trim_roots_with_class(CephContext *cct) { set<int> roots; find_shadow_roots(&roots); for (auto &r : roots) { if (r >= 0) continue; int res = remove_root(cct, r); if (res) return res; } // there is no need to reweight because we only remove from the // root and down return 0; } int32_t CrushWrapper::_alloc_class_id() const { if (class_name.empty()) { return 0; } int32_t class_id = class_name.rbegin()->first + 1; if (class_id >= 0) { return class_id; } // wrapped, pick a random start and do exhaustive search uint32_t upperlimit = std::numeric_limits<int32_t>::max(); upperlimit++; class_id = rand() % upperlimit; const auto start = class_id; do { if (!class_name.count(class_id)) { return class_id; } else { class_id++; if (class_id < 0) { class_id = 0; } } } while (class_id != start); ceph_abort_msg("no available class id"); } int CrushWrapper::set_subtree_class( const string& subtree, const string& new_class) { if (!name_exists(subtree)) { return -ENOENT; } int new_class_id = get_or_create_class_id(new_class); int id = get_item_id(subtree); list<int> q = { id }; while (!q.empty()) { int id = q.front(); q.pop_front(); crush_bucket *b = get_bucket(id); if (IS_ERR(b)) { return PTR_ERR(b); } for (unsigned i = 0; i < b->size; ++i) { int item = b->items[i]; if (item >= 0) { class_map[item] = new_class_id; } else { q.push_back(item); } } } return 0; } int CrushWrapper::reclassify( CephContext *cct, ostream& out, const map<string,string>& classify_root, const map<string,pair<string,string>>& classify_bucket ) { map<int,string> reclassified_bucket; // orig_id -> class // classify_root for (auto& i : classify_root) { string root = i.first; if (!name_exists(root)) { out << "root " << root << " does not exist" << std::endl; return -EINVAL; } int root_id = get_item_id(root); string new_class = i.second; int new_class_id = get_or_create_class_id(new_class); out << "classify_root " << root << " (" << root_id << ") as " << new_class << std::endl; // validate rules for (unsigned j = 0; j < crush->max_rules; j++) { if (crush->rules[j]) { auto rule = crush->rules[j]; for (unsigned k = 0; k < rule->len; ++k) { if (rule->steps[k].op == CRUSH_RULE_TAKE) { int step_item = get_rule_arg1(j, k); int original_item; int c; int res = split_id_class(step_item, &original_item, &c); if (res < 0) return res; if (c >= 0) { if (original_item == root_id) { out << " rule " << j << " includes take on root " << root << " class " << c << std::endl; return -EINVAL; } } } } } } // rebuild new buckets for root //cout << "before class_bucket: " << class_bucket << std::endl; map<int,int> renumber; list<int> q; q.push_back(root_id); while (!q.empty()) { int id = q.front(); q.pop_front(); crush_bucket *bucket = get_bucket(id); if (IS_ERR(bucket)) { out << "cannot find bucket " << id << ": " << cpp_strerror(PTR_ERR(bucket)) << std::endl; return PTR_ERR(bucket); } // move bucket int new_id = get_new_bucket_id(); out << " renumbering bucket " << id << " -> " << new_id << std::endl; renumber[id] = new_id; crush->buckets[-1-new_id] = bucket; bucket->id = new_id; crush->buckets[-1-id] = crush_make_bucket(crush, bucket->alg, bucket->hash, bucket->type, 0, NULL, NULL); crush->buckets[-1-id]->id = id; for (auto& i : choose_args) { i.second.args[-1-new_id] = i.second.args[-1-id]; memset(&i.second.args[-1-id], 0, sizeof(i.second.args[0])); } class_bucket.erase(id); class_bucket[new_id][new_class_id] = id; name_map[new_id] = string(get_item_name(id)); name_map[id] = string(get_item_name(id)) + "~" + new_class; for (unsigned j = 0; j < bucket->size; ++j) { if (bucket->items[j] < 0) { q.push_front(bucket->items[j]); } else { // we don't reclassify the device here; if the users wants that, // they can pass --set-subtree-class separately. } } } //cout << "mid class_bucket: " << class_bucket << std::endl; for (int i = 0; i < crush->max_buckets; ++i) { crush_bucket *b = crush->buckets[i]; if (!b) { continue; } for (unsigned j = 0; j < b->size; ++j) { if (renumber.count(b->items[j])) { b->items[j] = renumber[b->items[j]]; } } } int r = rebuild_roots_with_classes(cct); if (r < 0) { out << "failed to rebuild_roots_with_classes: " << cpp_strerror(r) << std::endl; return r; } //cout << "final class_bucket: " << class_bucket << std::endl; } // classify_bucket map<int,int> send_to; // source bucket -> dest bucket map<int,map<int,int>> new_class_bucket; map<int,string> new_bucket_names; map<int,map<string,string>> new_buckets; map<string,int> new_bucket_by_name; for (auto& i : classify_bucket) { const string& match = i.first; // prefix% or %suffix const string& new_class = i.second.first; const string& default_parent = i.second.second; if (!name_exists(default_parent)) { out << "default parent " << default_parent << " does not exist" << std::endl; return -EINVAL; } int default_parent_id = get_item_id(default_parent); crush_bucket *default_parent_bucket = get_bucket(default_parent_id); assert(default_parent_bucket); string default_parent_type_name = get_type_name(default_parent_bucket->type); out << "classify_bucket " << match << " as " << new_class << " default bucket " << default_parent << " (" << default_parent_type_name << ")" << std::endl; int new_class_id = get_or_create_class_id(new_class); for (int j = 0; j < crush->max_buckets; ++j) { crush_bucket *b = crush->buckets[j]; if (!b || is_shadow_item(b->id)) { continue; } string name = get_item_name(b->id); if (name.length() < match.length()) { continue; } string basename; if (match[0] == '%') { if (match.substr(1) != name.substr(name.size() - match.size() + 1)) { continue; } basename = name.substr(0, name.size() - match.size() + 1); } else if (match[match.size() - 1] == '%') { if (match.substr(0, match.size() - 1) != name.substr(0, match.size() - 1)) { continue; } basename = name.substr(match.size() - 1); } else if (match == name) { basename = default_parent; } else { continue; } cout << "match " << match << " to " << name << " basename " << basename << std::endl; // look up or create basename bucket int base_id; if (name_exists(basename)) { base_id = get_item_id(basename); cout << " have base " << base_id << std::endl; } else if (new_bucket_by_name.count(basename)) { base_id = new_bucket_by_name[basename]; cout << " already creating base " << base_id << std::endl; } else { base_id = get_new_bucket_id(); crush->buckets[-1-base_id] = crush_make_bucket(crush, b->alg, b->hash, b->type, 0, NULL, NULL); crush->buckets[-1-base_id]->id = base_id; name_map[base_id] = basename; new_bucket_by_name[basename] = base_id; cout << " created base " << base_id << std::endl; new_buckets[base_id][default_parent_type_name] = default_parent; } send_to[b->id] = base_id; new_class_bucket[base_id][new_class_id] = b->id; new_bucket_names[b->id] = basename + "~" + get_class_name(new_class_id); // make sure devices are classified for (unsigned i = 0; i < b->size; ++i) { int item = b->items[i]; if (item >= 0) { class_map[item] = new_class_id; } } } } // no name_exists() works below, have_rmaps = false; // copy items around //cout << "send_to " << send_to << std::endl; set<int> roots; find_roots(&roots); for (auto& i : send_to) { crush_bucket *from = get_bucket(i.first); crush_bucket *to = get_bucket(i.second); cout << "moving items from " << from->id << " (" << get_item_name(from->id) << ") to " << to->id << " (" << get_item_name(to->id) << ")" << std::endl; for (unsigned j = 0; j < from->size; ++j) { int item = from->items[j]; int r; map<string,string> to_loc; to_loc[get_type_name(to->type)] = get_item_name(to->id); if (item >= 0) { if (subtree_contains(to->id, item)) { continue; } map<string,string> from_loc; from_loc[get_type_name(from->type)] = get_item_name(from->id); auto w = get_item_weightf_in_loc(item, from_loc); r = insert_item(cct, item, w, get_item_name(item), to_loc); } else { if (!send_to.count(item)) { lderr(cct) << "item " << item << " in bucket " << from->id << " is not also a reclassified bucket" << dendl; return -EINVAL; } int newitem = send_to[item]; if (subtree_contains(to->id, newitem)) { continue; } r = link_bucket(cct, newitem, to_loc); } if (r != 0) { cout << __func__ << " err from insert_item: " << cpp_strerror(r) << std::endl; return r; } } } // make sure new buckets have parents for (auto& i : new_buckets) { int parent; if (get_immediate_parent_id(i.first, &parent) < 0) { cout << "new bucket " << i.first << " missing parent, adding at " << i.second << std::endl; int r = link_bucket(cct, i.first, i.second); if (r != 0) { cout << __func__ << " err from insert_item: " << cpp_strerror(r) << std::endl; return r; } } } // set class mappings //cout << "pre class_bucket: " << class_bucket << std::endl; for (auto& i : new_class_bucket) { for (auto& j : i.second) { class_bucket[i.first][j.first] = j.second; } } //cout << "post class_bucket: " << class_bucket << std::endl; for (auto& i : new_bucket_names) { name_map[i.first] = i.second; } int r = rebuild_roots_with_classes(cct); if (r < 0) { out << "failed to rebuild_roots_with_classes: " << cpp_strerror(r) << std::endl; return r; } //cout << "final class_bucket: " << class_bucket << std::endl; return 0; } int CrushWrapper::get_new_bucket_id() { int id = -1; while (crush->buckets[-1-id] && -1-id < crush->max_buckets) { id--; } if (-1-id == crush->max_buckets) { ++crush->max_buckets; crush->buckets = (struct crush_bucket**)realloc( crush->buckets, sizeof(crush->buckets[0]) * crush->max_buckets); for (auto& i : choose_args) { assert(i.second.size == (__u32)crush->max_buckets - 1); ++i.second.size; i.second.args = (struct crush_choose_arg*)realloc( i.second.args, sizeof(i.second.args[0]) * i.second.size); } } return id; } void CrushWrapper::reweight(CephContext *cct) { set<int> roots; find_nonshadow_roots(&roots); for (auto id : roots) { if (id >= 0) continue; crush_bucket *b = get_bucket(id); ldout(cct, 5) << "reweight root bucket " << id << dendl; int r = crush_reweight_bucket(crush, b); ceph_assert(r == 0); for (auto& i : choose_args) { //cout << "carg " << i.first << std::endl; vector<uint32_t> w; // discard top-level weights reweight_bucket(b, i.second, &w); } } int r = rebuild_roots_with_classes(cct); ceph_assert(r == 0); } void CrushWrapper::reweight_bucket( crush_bucket *b, crush_choose_arg_map& arg_map, vector<uint32_t> *weightv) { int idx = -1 - b->id; unsigned npos = arg_map.args[idx].weight_set_positions; //cout << __func__ << " " << b->id << " npos " << npos << std::endl; weightv->resize(npos); for (unsigned i = 0; i < b->size; ++i) { int item = b->items[i]; if (item >= 0) { for (unsigned pos = 0; pos < npos; ++pos) { (*weightv)[pos] += arg_map.args[idx].weight_set->weights[i]; } } else { vector<uint32_t> subw(npos); crush_bucket *sub = get_bucket(item); assert(sub); reweight_bucket(sub, arg_map, &subw); for (unsigned pos = 0; pos < npos; ++pos) { (*weightv)[pos] += subw[pos]; // strash the real bucket weight as the weights for this reference arg_map.args[idx].weight_set->weights[i] = subw[pos]; } } } //cout << __func__ << " finish " << b->id << " " << *weightv << std::endl; } int CrushWrapper::add_simple_rule_at( string name, string root_name, string failure_domain_name, string device_class, string mode, int rule_type, int rno, ostream *err) { if (rule_exists(name)) { if (err) *err << "rule " << name << " exists"; return -EEXIST; } if (rno >= 0) { if (rule_exists(rno)) { if (err) *err << "rule with ruleno " << rno << " exists"; return -EEXIST; } } else { for (rno = 0; rno < get_max_rules(); rno++) { if (!rule_exists(rno)) break; } } if (!name_exists(root_name)) { if (err) *err << "root item " << root_name << " does not exist"; return -ENOENT; } int root = get_item_id(root_name); int type = 0; if (failure_domain_name.length()) { type = get_type_id(failure_domain_name); if (type < 0) { if (err) *err << "unknown type " << failure_domain_name; return -EINVAL; } } if (device_class.size()) { if (!class_exists(device_class)) { if (err) *err << "device class " << device_class << " does not exist"; return -EINVAL; } int c = get_class_id(device_class); if (class_bucket.count(root) == 0 || class_bucket[root].count(c) == 0) { if (err) *err << "root " << root_name << " has no devices with class " << device_class; return -EINVAL; } root = class_bucket[root][c]; } if (mode != "firstn" && mode != "indep") { if (err) *err << "unknown mode " << mode; return -EINVAL; } int steps = 3; if (mode == "indep") steps = 5; crush_rule *rule = crush_make_rule(steps, rule_type); ceph_assert(rule); int step = 0; if (mode == "indep") { crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); } crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, root, 0); if (type) crush_rule_set_step(rule, step++, mode == "firstn" ? CRUSH_RULE_CHOOSELEAF_FIRSTN : CRUSH_RULE_CHOOSELEAF_INDEP, CRUSH_CHOOSE_N, type); else crush_rule_set_step(rule, step++, mode == "firstn" ? CRUSH_RULE_CHOOSE_FIRSTN : CRUSH_RULE_CHOOSE_INDEP, CRUSH_CHOOSE_N, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); int ret = crush_add_rule(crush, rule, rno); if(ret < 0) { *err << "failed to add rule " << rno << " because " << cpp_strerror(ret); return ret; } set_rule_name(rno, name); have_rmaps = false; return rno; } int CrushWrapper::add_simple_rule( string name, string root_name, string failure_domain_name, string device_class, string mode, int rule_type, ostream *err) { return add_simple_rule_at(name, root_name, failure_domain_name, device_class, mode, rule_type, -1, err); } float CrushWrapper::_get_take_weight_osd_map(int root, map<int,float> *pmap) const { float sum = 0.0; list<int> q; q.push_back(root); //breadth first iterate the OSD tree while (!q.empty()) { int bno = q.front(); q.pop_front(); crush_bucket *b = crush->buckets[-1-bno]; ceph_assert(b); for (unsigned j=0; j<b->size; ++j) { int item_id = b->items[j]; if (item_id >= 0) { //it's an OSD float w = crush_get_bucket_item_weight(b, j); (*pmap)[item_id] = w; sum += w; } else { //not an OSD, expand the child later q.push_back(item_id); } } } return sum; } void CrushWrapper::_normalize_weight_map(float sum, const map<int,float>& m, map<int,float> *pmap) const { for (auto& p : m) { map<int,float>::iterator q = pmap->find(p.first); if (q == pmap->end()) { (*pmap)[p.first] = p.second / sum; } else { q->second += p.second / sum; } } } int CrushWrapper::get_take_weight_osd_map(int root, map<int,float> *pmap) const { map<int,float> m; float sum = _get_take_weight_osd_map(root, &m); _normalize_weight_map(sum, m, pmap); return 0; } int CrushWrapper::get_rule_weight_osd_map(unsigned ruleno, map<int,float> *pmap) const { if (ruleno >= crush->max_rules) return -ENOENT; if (crush->rules[ruleno] == NULL) return -ENOENT; crush_rule *rule = crush->rules[ruleno]; // build a weight map for each TAKE in the rule, and then merge them // FIXME: if there are multiple takes that place a different number of // objects we do not take that into account. (Also, note that doing this // right is also a function of the pool, since the crush rule // might choose 2 + choose 2 but pool size may only be 3.) for (unsigned i=0; i<rule->len; ++i) { map<int,float> m; float sum = 0; if (rule->steps[i].op == CRUSH_RULE_TAKE) { int n = rule->steps[i].arg1; if (n >= 0) { m[n] = 1.0; sum = 1.0; } else { sum += _get_take_weight_osd_map(n, &m); } } _normalize_weight_map(sum, m, pmap); } return 0; } int CrushWrapper::remove_rule(int ruleno) { if (ruleno >= (int)crush->max_rules) return -ENOENT; if (crush->rules[ruleno] == NULL) return -ENOENT; crush_destroy_rule(crush->rules[ruleno]); crush->rules[ruleno] = NULL; rule_name_map.erase(ruleno); have_rmaps = false; return rebuild_roots_with_classes(nullptr); } int CrushWrapper::bucket_adjust_item_weight( CephContext *cct, crush_bucket *bucket, int item, int weight, bool adjust_weight_sets) { if (adjust_weight_sets) { unsigned position; for (position = 0; position < bucket->size; position++) if (bucket->items[position] == item) break; ceph_assert(position != bucket->size); for (auto &w : choose_args) { crush_choose_arg_map &arg_map = w.second; crush_choose_arg *arg = &arg_map.args[-1-bucket->id]; for (__u32 j = 0; j < arg->weight_set_positions; j++) { crush_weight_set *weight_set = &arg->weight_set[j]; weight_set->weights[position] = weight; } } } return crush_bucket_adjust_item_weight(crush, bucket, item, weight); } int CrushWrapper::add_bucket( int bucketno, int alg, int hash, int type, int size, int *items, int *weights, int *idout) { if (alg == 0) { alg = get_default_bucket_alg(); if (alg == 0) return -EINVAL; } crush_bucket *b = crush_make_bucket(crush, alg, hash, type, size, items, weights); ceph_assert(b); ceph_assert(idout); int r = crush_add_bucket(crush, bucketno, b, idout); int pos = -1 - *idout; for (auto& p : choose_args) { crush_choose_arg_map& cmap = p.second; unsigned new_size = crush->max_buckets; if (cmap.args) { if ((int)cmap.size < crush->max_buckets) { cmap.args = static_cast<crush_choose_arg*>(realloc( cmap.args, sizeof(crush_choose_arg) * new_size)); ceph_assert(cmap.args); memset(&cmap.args[cmap.size], 0, sizeof(crush_choose_arg) * (new_size - cmap.size)); cmap.size = new_size; } } else { cmap.args = static_cast<crush_choose_arg*>(calloc(sizeof(crush_choose_arg), new_size)); ceph_assert(cmap.args); cmap.size = new_size; } if (size > 0) { int positions = get_choose_args_positions(cmap); crush_choose_arg& carg = cmap.args[pos]; carg.weight_set = static_cast<crush_weight_set*>(calloc(sizeof(crush_weight_set), size)); carg.weight_set_positions = positions; for (int ppos = 0; ppos < positions; ++ppos) { carg.weight_set[ppos].weights = (__u32*)calloc(sizeof(__u32), size); carg.weight_set[ppos].size = size; for (int bpos = 0; bpos < size; ++bpos) { carg.weight_set[ppos].weights[bpos] = weights[bpos]; } } } assert(crush->max_buckets == (int)cmap.size); } return r; } int CrushWrapper::bucket_add_item(crush_bucket *bucket, int item, int weight) { __u32 new_size = bucket->size + 1; int r = crush_bucket_add_item(crush, bucket, item, weight); if (r < 0) { return r; } for (auto &w : choose_args) { crush_choose_arg_map &arg_map = w.second; crush_choose_arg *arg = &arg_map.args[-1-bucket->id]; for (__u32 j = 0; j < arg->weight_set_positions; j++) { crush_weight_set *weight_set = &arg->weight_set[j]; weight_set->weights = (__u32*)realloc(weight_set->weights, new_size * sizeof(__u32)); ceph_assert(weight_set->size + 1 == new_size); weight_set->weights[weight_set->size] = weight; weight_set->size = new_size; } if (arg->ids_size) { arg->ids = (__s32 *)realloc(arg->ids, new_size * sizeof(__s32)); ceph_assert(arg->ids_size + 1 == new_size); arg->ids[arg->ids_size] = item; arg->ids_size = new_size; } } return 0; } int CrushWrapper::bucket_remove_item(crush_bucket *bucket, int item) { __u32 new_size = bucket->size - 1; unsigned position; for (position = 0; position < bucket->size; position++) if (bucket->items[position] == item) break; ceph_assert(position != bucket->size); int r = crush_bucket_remove_item(crush, bucket, item); if (r < 0) { return r; } for (auto &w : choose_args) { crush_choose_arg_map &arg_map = w.second; crush_choose_arg *arg = &arg_map.args[-1-bucket->id]; for (__u32 j = 0; j < arg->weight_set_positions; j++) { crush_weight_set *weight_set = &arg->weight_set[j]; ceph_assert(weight_set->size - 1 == new_size); for (__u32 k = position; k < new_size; k++) weight_set->weights[k] = weight_set->weights[k+1]; if (new_size) { weight_set->weights = (__u32*)realloc(weight_set->weights, new_size * sizeof(__u32)); } else { free(weight_set->weights); weight_set->weights = NULL; } weight_set->size = new_size; } if (arg->ids_size) { ceph_assert(arg->ids_size - 1 == new_size); for (__u32 k = position; k < new_size; k++) arg->ids[k] = arg->ids[k+1]; if (new_size) { arg->ids = (__s32 *)realloc(arg->ids, new_size * sizeof(__s32)); } else { free(arg->ids); arg->ids = NULL; } arg->ids_size = new_size; } } return 0; } int CrushWrapper::bucket_set_alg(int bid, int alg) { crush_bucket *b = get_bucket(bid); if (!b) { return -ENOENT; } b->alg = alg; return 0; } int CrushWrapper::update_device_class(int id, const string& class_name, const string& name, ostream *ss) { ceph_assert(item_exists(id)); auto old_class_name = get_item_class(id); if (old_class_name && old_class_name != class_name) { *ss << "osd." << id << " has already bound to class '" << old_class_name << "', can not reset class to '" << class_name << "'; " << "use 'ceph osd crush rm-device-class <id>' to " << "remove old class first"; return -EBUSY; } int class_id = get_or_create_class_id(class_name); if (id < 0) { *ss << name << " id " << id << " is negative"; return -EINVAL; } if (class_map.count(id) != 0 && class_map[id] == class_id) { *ss << name << " already set to class " << class_name << ". "; return 0; } set_item_class(id, class_id); int r = rebuild_roots_with_classes(nullptr); if (r < 0) return r; return 1; } int CrushWrapper::remove_device_class(CephContext *cct, int id, ostream *ss) { ceph_assert(ss); const char *name = get_item_name(id); if (!name) { *ss << "osd." << id << " does not have a name"; return -ENOENT; } const char *class_name = get_item_class(id); if (!class_name) { *ss << "osd." << id << " has not been bound to a specific class yet"; return 0; } class_remove_item(id); int r = rebuild_roots_with_classes(cct); if (r < 0) { *ss << "unable to rebuild roots with class '" << class_name << "' " << "of osd." << id << ": " << cpp_strerror(r); return r; } return 0; } int CrushWrapper::device_class_clone( int original_id, int device_class, const std::map<int32_t, map<int32_t, int32_t>>& old_class_bucket, const std::set<int32_t>& used_ids, int *clone, map<int,map<int,vector<int>>> *cmap_item_weight) { const char *item_name = get_item_name(original_id); if (item_name == NULL) return -ECHILD; const char *class_name = get_class_name(device_class); if (class_name == NULL) return -EBADF; string copy_name = item_name + string("~") + class_name; if (name_exists(copy_name)) { *clone = get_item_id(copy_name); return 0; } crush_bucket *original = get_bucket(original_id); ceph_assert(!IS_ERR(original)); crush_bucket *copy = crush_make_bucket(crush, original->alg, original->hash, original->type, 0, NULL, NULL); ceph_assert(copy); vector<unsigned> item_orig_pos; // new item pos -> orig item pos for (unsigned i = 0; i < original->size; i++) { int item = original->items[i]; int weight = crush_get_bucket_item_weight(original, i); if (item >= 0) { if (class_map.count(item) != 0 && class_map[item] == device_class) { int res = crush_bucket_add_item(crush, copy, item, weight); if (res) return res; } else { continue; } } else { int child_copy_id; int res = device_class_clone(item, device_class, old_class_bucket, used_ids, &child_copy_id, cmap_item_weight); if (res < 0) return res; crush_bucket *child_copy = get_bucket(child_copy_id); ceph_assert(!IS_ERR(child_copy)); res = crush_bucket_add_item(crush, copy, child_copy_id, child_copy->weight); if (res) return res; } item_orig_pos.push_back(i); } ceph_assert(item_orig_pos.size() == copy->size); int bno = 0; if (old_class_bucket.count(original_id) && old_class_bucket.at(original_id).count(device_class)) { bno = old_class_bucket.at(original_id).at(device_class); } else { // pick a new shadow bucket id that is not used by the current map // *or* any previous shadow buckets. bno = -1; while (((-1-bno) < crush->max_buckets && crush->buckets[-1-bno]) || used_ids.count(bno)) { --bno; } } int res = crush_add_bucket(crush, bno, copy, clone); if (res) return res; ceph_assert(!bno || bno == *clone); res = set_item_class(*clone, device_class); if (res < 0) return res; // we do not use set_item_name because the name is intentionally invalid name_map[*clone] = copy_name; if (have_rmaps) name_rmap[copy_name] = *clone; class_bucket[original_id][device_class] = *clone; // set up choose_args for the new bucket. for (auto& w : choose_args) { crush_choose_arg_map& cmap = w.second; if (crush->max_buckets > (int)cmap.size) { unsigned new_size = crush->max_buckets; cmap.args = static_cast<crush_choose_arg*>(realloc(cmap.args, new_size * sizeof(cmap.args[0]))); ceph_assert(cmap.args); memset(cmap.args + cmap.size, 0, (new_size - cmap.size) * sizeof(cmap.args[0])); cmap.size = new_size; } auto& o = cmap.args[-1-original_id]; auto& n = cmap.args[-1-bno]; n.ids_size = 0; // FIXME: implement me someday n.weight_set_positions = o.weight_set_positions; n.weight_set = static_cast<crush_weight_set*>(calloc( n.weight_set_positions, sizeof(crush_weight_set))); for (size_t s = 0; s < n.weight_set_positions; ++s) { n.weight_set[s].size = copy->size; n.weight_set[s].weights = (__u32*)calloc(copy->size, sizeof(__u32)); } for (size_t s = 0; s < n.weight_set_positions; ++s) { vector<int> bucket_weights(n.weight_set_positions); for (size_t i = 0; i < copy->size; ++i) { int item = copy->items[i]; if (item >= 0) { n.weight_set[s].weights[i] = o.weight_set[s].weights[item_orig_pos[i]]; } else if ((*cmap_item_weight)[w.first].count(item)) { n.weight_set[s].weights[i] = (*cmap_item_weight)[w.first][item][s]; } else { n.weight_set[s].weights[i] = 0; } bucket_weights[s] += n.weight_set[s].weights[i]; } (*cmap_item_weight)[w.first][bno] = bucket_weights; } } return 0; } int CrushWrapper::get_rules_by_class(const string &class_name, set<int> *rules) { ceph_assert(rules); rules->clear(); if (!class_exists(class_name)) { return -ENOENT; } int class_id = get_class_id(class_name); for (unsigned i = 0; i < crush->max_rules; ++i) { crush_rule *r = crush->rules[i]; if (!r) continue; for (unsigned j = 0; j < r->len; ++j) { if (r->steps[j].op == CRUSH_RULE_TAKE) { int step_item = r->steps[j].arg1; int original_item; int c; int res = split_id_class(step_item, &original_item, &c); if (res < 0) { return res; } if (c != -1 && c == class_id) { rules->insert(i); break; } } } } return 0; } // return rules that might reference the given osd int CrushWrapper::get_rules_by_osd(int osd, set<int> *rules) { ceph_assert(rules); rules->clear(); if (osd < 0) { return -EINVAL; } for (unsigned i = 0; i < crush->max_rules; ++i) { crush_rule *r = crush->rules[i]; if (!r) continue; for (unsigned j = 0; j < r->len; ++j) { if (r->steps[j].op == CRUSH_RULE_TAKE) { int step_item = r->steps[j].arg1; list<int> unordered; int rc = _get_leaves(step_item, &unordered); if (rc < 0) { return rc; // propagate fatal errors! } bool match = false; for (auto &o: unordered) { ceph_assert(o >= 0); if (o == osd) { match = true; break; } } if (match) { rules->insert(i); break; } } } } return 0; } bool CrushWrapper::_class_is_dead(int class_id) { for (auto &p: class_map) { if (p.first >= 0 && p.second == class_id) { return false; } } for (unsigned i = 0; i < crush->max_rules; ++i) { crush_rule *r = crush->rules[i]; if (!r) continue; for (unsigned j = 0; j < r->len; ++j) { if (r->steps[j].op == CRUSH_RULE_TAKE) { int root = r->steps[j].arg1; for (auto &p : class_bucket) { auto& q = p.second; if (q.count(class_id) && q[class_id] == root) { return false; } } } } } // no more referenced by any devices or crush rules return true; } void CrushWrapper::cleanup_dead_classes() { auto p = class_name.begin(); while (p != class_name.end()) { if (_class_is_dead(p->first)) { string n = p->second; ++p; remove_class_name(n); } else { ++p; } } } int CrushWrapper::rebuild_roots_with_classes(CephContext *cct) { std::map<int32_t, map<int32_t, int32_t> > old_class_bucket = class_bucket; cleanup_dead_classes(); int r = trim_roots_with_class(cct); if (r < 0) return r; class_bucket.clear(); return populate_classes(old_class_bucket); } void CrushWrapper::encode(bufferlist& bl, uint64_t features) const { using ceph::encode; ceph_assert(crush); __u32 magic = CRUSH_MAGIC; encode(magic, bl); encode(crush->max_buckets, bl); encode(crush->max_rules, bl); encode(crush->max_devices, bl); bool encode_compat_choose_args = false; crush_choose_arg_map arg_map; memset(&arg_map, '\0', sizeof(arg_map)); if (has_choose_args() && !HAVE_FEATURE(features, CRUSH_CHOOSE_ARGS)) { ceph_assert(!has_incompat_choose_args()); encode_compat_choose_args = true; arg_map = choose_args.begin()->second; } // buckets for (int i=0; i<crush->max_buckets; i++) { __u32 alg = 0; if (crush->buckets[i]) alg = crush->buckets[i]->alg; encode(alg, bl); if (!alg) continue; encode(crush->buckets[i]->id, bl); encode(crush->buckets[i]->type, bl); encode(crush->buckets[i]->alg, bl); encode(crush->buckets[i]->hash, bl); encode(crush->buckets[i]->weight, bl); encode(crush->buckets[i]->size, bl); for (unsigned j=0; j<crush->buckets[i]->size; j++) encode(crush->buckets[i]->items[j], bl); switch (crush->buckets[i]->alg) { case CRUSH_BUCKET_UNIFORM: encode((reinterpret_cast<crush_bucket_uniform*>(crush->buckets[i]))->item_weight, bl); break; case CRUSH_BUCKET_LIST: for (unsigned j=0; j<crush->buckets[i]->size; j++) { encode((reinterpret_cast<crush_bucket_list*>(crush->buckets[i]))->item_weights[j], bl); encode((reinterpret_cast<crush_bucket_list*>(crush->buckets[i]))->sum_weights[j], bl); } break; case CRUSH_BUCKET_TREE: encode((reinterpret_cast<crush_bucket_tree*>(crush->buckets[i]))->num_nodes, bl); for (unsigned j=0; j<(reinterpret_cast<crush_bucket_tree*>(crush->buckets[i]))->num_nodes; j++) encode((reinterpret_cast<crush_bucket_tree*>(crush->buckets[i]))->node_weights[j], bl); break; case CRUSH_BUCKET_STRAW: for (unsigned j=0; j<crush->buckets[i]->size; j++) { encode((reinterpret_cast<crush_bucket_straw*>(crush->buckets[i]))->item_weights[j], bl); encode((reinterpret_cast<crush_bucket_straw*>(crush->buckets[i]))->straws[j], bl); } break; case CRUSH_BUCKET_STRAW2: { __u32 *weights; if (encode_compat_choose_args && arg_map.args[i].weight_set_positions > 0) { weights = arg_map.args[i].weight_set[0].weights; } else { weights = (reinterpret_cast<crush_bucket_straw2*>(crush->buckets[i]))->item_weights; } for (unsigned j=0; j<crush->buckets[i]->size; j++) { encode(weights[j], bl); } } break; default: ceph_abort(); break; } } // rules for (unsigned i=0; i<crush->max_rules; i++) { __u32 yes = crush->rules[i] ? 1:0; encode(yes, bl); if (!yes) continue; encode(crush->rules[i]->len, bl); /* * legacy crush_rule_mask was * * struct crush_rule_mask { * __u8 ruleset; * __u8 type; * __u8 min_size; * __u8 max_size; * }; * * encode ruleset=ruleid, and min/max of 1/100 */ encode((__u8)i, bl); // ruleset == ruleid encode(crush->rules[i]->type, bl); if (HAVE_FEATURE(features, SERVER_QUINCY)) { encode((__u8)1, bl); // min_size = 1 encode((__u8)100, bl); // max_size = 100 } else { encode(crush->rules[i]->deprecated_min_size, bl); encode(crush->rules[i]->deprecated_max_size, bl); } for (unsigned j=0; j<crush->rules[i]->len; j++) encode(crush->rules[i]->steps[j], bl); } // name info encode(type_map, bl); encode(name_map, bl); encode(rule_name_map, bl); // tunables encode(crush->choose_local_tries, bl); encode(crush->choose_local_fallback_tries, bl); encode(crush->choose_total_tries, bl); encode(crush->chooseleaf_descend_once, bl); encode(crush->chooseleaf_vary_r, bl); encode(crush->straw_calc_version, bl); encode(crush->allowed_bucket_algs, bl); if (features & CEPH_FEATURE_CRUSH_TUNABLES5) { encode(crush->chooseleaf_stable, bl); } if (HAVE_FEATURE(features, SERVER_LUMINOUS)) { // device classes encode(class_map, bl); encode(class_name, bl); encode(class_bucket, bl); // choose args __u32 size = (__u32)choose_args.size(); encode(size, bl); for (auto c : choose_args) { encode(c.first, bl); crush_choose_arg_map arg_map = c.second; size = 0; for (__u32 i = 0; i < arg_map.size; i++) { crush_choose_arg *arg = &arg_map.args[i]; if (arg->weight_set_positions == 0 && arg->ids_size == 0) continue; size++; } encode(size, bl); for (__u32 i = 0; i < arg_map.size; i++) { crush_choose_arg *arg = &arg_map.args[i]; if (arg->weight_set_positions == 0 && arg->ids_size == 0) continue; encode(i, bl); encode(arg->weight_set_positions, bl); for (__u32 j = 0; j < arg->weight_set_positions; j++) { crush_weight_set *weight_set = &arg->weight_set[j]; encode(weight_set->size, bl); for (__u32 k = 0; k < weight_set->size; k++) encode(weight_set->weights[k], bl); } encode(arg->ids_size, bl); for (__u32 j = 0; j < arg->ids_size; j++) encode(arg->ids[j], bl); } } } } static void decode_32_or_64_string_map(map<int32_t,string>& m, bufferlist::const_iterator& blp) { m.clear(); __u32 n; decode(n, blp); while (n--) { __s32 key; decode(key, blp); __u32 strlen; decode(strlen, blp); if (strlen == 0) { // der, key was actually 64-bits! decode(strlen, blp); } decode_nohead(strlen, m[key], blp); } } void CrushWrapper::decode(bufferlist::const_iterator& blp) { using ceph::decode; create(); __u32 magic; decode(magic, blp); if (magic != CRUSH_MAGIC) throw ceph::buffer::malformed_input("bad magic number"); decode(crush->max_buckets, blp); decode(crush->max_rules, blp); decode(crush->max_devices, blp); // legacy tunables, unless we decode something newer set_tunables_legacy(); try { // buckets crush->buckets = (crush_bucket**)calloc(1, crush->max_buckets * sizeof(crush_bucket*)); for (int i=0; i<crush->max_buckets; i++) { decode_crush_bucket(&crush->buckets[i], blp); } // rules crush->rules = (crush_rule**)calloc(1, crush->max_rules * sizeof(crush_rule*)); for (unsigned i = 0; i < crush->max_rules; ++i) { __u32 yes; decode(yes, blp); if (!yes) { crush->rules[i] = NULL; continue; } __u32 len; decode(len, blp); crush->rules[i] = reinterpret_cast<crush_rule*>(calloc(1, crush_rule_size(len))); crush->rules[i]->len = len; __u8 ruleset; // ignore + discard decode(ruleset, blp); if (ruleset != i) { throw ::ceph::buffer::malformed_input("crush ruleset_id != rule_id; encoding is too old"); } decode(crush->rules[i]->type, blp); decode(crush->rules[i]->deprecated_min_size, blp); decode(crush->rules[i]->deprecated_max_size, blp); for (unsigned j=0; j<crush->rules[i]->len; j++) decode(crush->rules[i]->steps[j], blp); } // name info // NOTE: we had a bug where we were incoding int instead of int32, which means the // 'key' field for these maps may be either 32 or 64 bits, depending. tolerate // both by assuming the string is always non-empty. decode_32_or_64_string_map(type_map, blp); decode_32_or_64_string_map(name_map, blp); decode_32_or_64_string_map(rule_name_map, blp); // tunables if (!blp.end()) { decode(crush->choose_local_tries, blp); decode(crush->choose_local_fallback_tries, blp); decode(crush->choose_total_tries, blp); } if (!blp.end()) { decode(crush->chooseleaf_descend_once, blp); } if (!blp.end()) { decode(crush->chooseleaf_vary_r, blp); } if (!blp.end()) { decode(crush->straw_calc_version, blp); } if (!blp.end()) { decode(crush->allowed_bucket_algs, blp); } if (!blp.end()) { decode(crush->chooseleaf_stable, blp); } if (!blp.end()) { decode(class_map, blp); decode(class_name, blp); for (auto &c : class_name) class_rname[c.second] = c.first; decode(class_bucket, blp); } if (!blp.end()) { __u32 choose_args_size; decode(choose_args_size, blp); for (__u32 i = 0; i < choose_args_size; i++) { typename decltype(choose_args)::key_type choose_args_index; decode(choose_args_index, blp); crush_choose_arg_map arg_map; arg_map.size = crush->max_buckets; arg_map.args = static_cast<crush_choose_arg*>(calloc( arg_map.size, sizeof(crush_choose_arg))); __u32 size; decode(size, blp); for (__u32 j = 0; j < size; j++) { __u32 bucket_index; decode(bucket_index, blp); ceph_assert(bucket_index < arg_map.size); crush_choose_arg *arg = &arg_map.args[bucket_index]; decode(arg->weight_set_positions, blp); if (arg->weight_set_positions) { arg->weight_set = static_cast<crush_weight_set*>(calloc( arg->weight_set_positions, sizeof(crush_weight_set))); for (__u32 k = 0; k < arg->weight_set_positions; k++) { crush_weight_set *weight_set = &arg->weight_set[k]; decode(weight_set->size, blp); weight_set->weights = (__u32*)calloc( weight_set->size, sizeof(__u32)); for (__u32 l = 0; l < weight_set->size; l++) decode(weight_set->weights[l], blp); } } decode(arg->ids_size, blp); if (arg->ids_size) { ceph_assert(arg->ids_size == crush->buckets[bucket_index]->size); arg->ids = (__s32 *)calloc(arg->ids_size, sizeof(__s32)); for (__u32 k = 0; k < arg->ids_size; k++) decode(arg->ids[k], blp); } } choose_args[choose_args_index] = arg_map; } } update_choose_args(nullptr); // in case we decode a legacy "corrupted" map finalize(); } catch (...) { crush_destroy(crush); throw; } } void CrushWrapper::decode_crush_bucket(crush_bucket** bptr, bufferlist::const_iterator &blp) { using ceph::decode; __u32 alg; decode(alg, blp); if (!alg) { *bptr = NULL; return; } int size = 0; switch (alg) { case CRUSH_BUCKET_UNIFORM: size = sizeof(crush_bucket_uniform); break; case CRUSH_BUCKET_LIST: size = sizeof(crush_bucket_list); break; case CRUSH_BUCKET_TREE: size = sizeof(crush_bucket_tree); break; case CRUSH_BUCKET_STRAW: size = sizeof(crush_bucket_straw); break; case CRUSH_BUCKET_STRAW2: size = sizeof(crush_bucket_straw2); break; default: { char str[128]; snprintf(str, sizeof(str), "unsupported bucket algorithm: %d", alg); throw ceph::buffer::malformed_input(str); } } crush_bucket *bucket = reinterpret_cast<crush_bucket*>(calloc(1, size)); *bptr = bucket; decode(bucket->id, blp); decode(bucket->type, blp); decode(bucket->alg, blp); decode(bucket->hash, blp); decode(bucket->weight, blp); decode(bucket->size, blp); bucket->items = (__s32*)calloc(1, bucket->size * sizeof(__s32)); for (unsigned j = 0; j < bucket->size; ++j) { decode(bucket->items[j], blp); } switch (bucket->alg) { case CRUSH_BUCKET_UNIFORM: decode((reinterpret_cast<crush_bucket_uniform*>(bucket))->item_weight, blp); break; case CRUSH_BUCKET_LIST: { crush_bucket_list* cbl = reinterpret_cast<crush_bucket_list*>(bucket); cbl->item_weights = (__u32*)calloc(1, bucket->size * sizeof(__u32)); cbl->sum_weights = (__u32*)calloc(1, bucket->size * sizeof(__u32)); for (unsigned j = 0; j < bucket->size; ++j) { decode(cbl->item_weights[j], blp); decode(cbl->sum_weights[j], blp); } break; } case CRUSH_BUCKET_TREE: { crush_bucket_tree* cbt = reinterpret_cast<crush_bucket_tree*>(bucket); decode(cbt->num_nodes, blp); cbt->node_weights = (__u32*)calloc(1, cbt->num_nodes * sizeof(__u32)); for (unsigned j=0; j<cbt->num_nodes; j++) { decode(cbt->node_weights[j], blp); } break; } case CRUSH_BUCKET_STRAW: { crush_bucket_straw* cbs = reinterpret_cast<crush_bucket_straw*>(bucket); cbs->straws = (__u32*)calloc(1, bucket->size * sizeof(__u32)); cbs->item_weights = (__u32*)calloc(1, bucket->size * sizeof(__u32)); for (unsigned j = 0; j < bucket->size; ++j) { decode(cbs->item_weights[j], blp); decode(cbs->straws[j], blp); } break; } case CRUSH_BUCKET_STRAW2: { crush_bucket_straw2* cbs = reinterpret_cast<crush_bucket_straw2*>(bucket); cbs->item_weights = (__u32*)calloc(1, bucket->size * sizeof(__u32)); for (unsigned j = 0; j < bucket->size; ++j) { decode(cbs->item_weights[j], blp); } break; } default: // We should have handled this case in the first switch statement ceph_abort(); break; } } void CrushWrapper::dump(Formatter *f) const { f->open_array_section("devices"); for (int i=0; i<get_max_devices(); i++) { f->open_object_section("device"); f->dump_int("id", i); const char *n = get_item_name(i); if (n) { f->dump_string("name", n); } else { char name[20]; sprintf(name, "device%d", i); f->dump_string("name", name); } const char *device_class = get_item_class(i); if (device_class != NULL) f->dump_string("class", device_class); f->close_section(); } f->close_section(); f->open_array_section("types"); int n = get_num_type_names(); for (int i=0; n; i++) { const char *name = get_type_name(i); if (!name) { if (i == 0) { f->open_object_section("type"); f->dump_int("type_id", 0); f->dump_string("name", "device"); f->close_section(); } continue; } n--; f->open_object_section("type"); f->dump_int("type_id", i); f->dump_string("name", name); f->close_section(); } f->close_section(); f->open_array_section("buckets"); for (int bucket = -1; bucket > -1-get_max_buckets(); --bucket) { if (!bucket_exists(bucket)) continue; f->open_object_section("bucket"); f->dump_int("id", bucket); if (get_item_name(bucket)) f->dump_string("name", get_item_name(bucket)); f->dump_int("type_id", get_bucket_type(bucket)); if (get_type_name(get_bucket_type(bucket))) f->dump_string("type_name", get_type_name(get_bucket_type(bucket))); f->dump_int("weight", get_bucket_weight(bucket)); f->dump_string("alg", crush_bucket_alg_name(get_bucket_alg(bucket))); f->dump_string("hash", crush_hash_name(get_bucket_hash(bucket))); f->open_array_section("items"); for (int j=0; j<get_bucket_size(bucket); j++) { f->open_object_section("item"); f->dump_int("id", get_bucket_item(bucket, j)); f->dump_int("weight", get_bucket_item_weight(bucket, j)); f->dump_int("pos", j); f->close_section(); } f->close_section(); f->close_section(); } f->close_section(); f->open_array_section("rules"); dump_rules(f); f->close_section(); f->open_object_section("tunables"); dump_tunables(f); f->close_section(); dump_choose_args(f); } namespace { // depth first walker class TreeDumper { typedef CrushTreeDumper::Item Item; const CrushWrapper *crush; const CrushTreeDumper::name_map_t& weight_set_names; public: explicit TreeDumper(const CrushWrapper *crush, const CrushTreeDumper::name_map_t& wsnames) : crush(crush), weight_set_names(wsnames) {} void dump(Formatter *f) { set<int> roots; crush->find_roots(&roots); for (set<int>::iterator root = roots.begin(); root != roots.end(); ++root) { dump_item(Item(*root, 0, 0, crush->get_bucket_weightf(*root)), f); } } private: void dump_item(const Item& qi, Formatter* f) { if (qi.is_bucket()) { f->open_object_section("bucket"); CrushTreeDumper::dump_item_fields(crush, weight_set_names, qi, f); dump_bucket_children(qi, f); f->close_section(); } else { f->open_object_section("device"); CrushTreeDumper::dump_item_fields(crush, weight_set_names, qi, f); f->close_section(); } } void dump_bucket_children(const Item& parent, Formatter* f) { f->open_array_section("items"); const int max_pos = crush->get_bucket_size(parent.id); for (int pos = 0; pos < max_pos; pos++) { int id = crush->get_bucket_item(parent.id, pos); float weight = crush->get_bucket_item_weightf(parent.id, pos); dump_item(Item(id, parent.id, parent.depth + 1, weight), f); } f->close_section(); } }; } void CrushWrapper::dump_tree( Formatter *f, const CrushTreeDumper::name_map_t& weight_set_names) const { ceph_assert(f); TreeDumper(this, weight_set_names).dump(f); } void CrushWrapper::dump_tunables(Formatter *f) const { f->dump_int("choose_local_tries", get_choose_local_tries()); f->dump_int("choose_local_fallback_tries", get_choose_local_fallback_tries()); f->dump_int("choose_total_tries", get_choose_total_tries()); f->dump_int("chooseleaf_descend_once", get_chooseleaf_descend_once()); f->dump_int("chooseleaf_vary_r", get_chooseleaf_vary_r()); f->dump_int("chooseleaf_stable", get_chooseleaf_stable()); f->dump_int("straw_calc_version", get_straw_calc_version()); f->dump_int("allowed_bucket_algs", get_allowed_bucket_algs()); // be helpful about it if (has_jewel_tunables()) f->dump_string("profile", "jewel"); else if (has_hammer_tunables()) f->dump_string("profile", "hammer"); else if (has_firefly_tunables()) f->dump_string("profile", "firefly"); else if (has_bobtail_tunables()) f->dump_string("profile", "bobtail"); else if (has_argonaut_tunables()) f->dump_string("profile", "argonaut"); else f->dump_string("profile", "unknown"); f->dump_int("optimal_tunables", (int)has_optimal_tunables()); f->dump_int("legacy_tunables", (int)has_legacy_tunables()); // be helpful about minimum version required f->dump_string("minimum_required_version", get_min_required_version()); f->dump_int("require_feature_tunables", (int)has_nondefault_tunables()); f->dump_int("require_feature_tunables2", (int)has_nondefault_tunables2()); f->dump_int("has_v2_rules", (int)has_v2_rules()); f->dump_int("require_feature_tunables3", (int)has_nondefault_tunables3()); f->dump_int("has_v3_rules", (int)has_v3_rules()); f->dump_int("has_v4_buckets", (int)has_v4_buckets()); f->dump_int("require_feature_tunables5", (int)has_nondefault_tunables5()); f->dump_int("has_v5_rules", (int)has_v5_rules()); } void CrushWrapper::dump_choose_args(Formatter *f) const { f->open_object_section("choose_args"); for (auto c : choose_args) { crush_choose_arg_map arg_map = c.second; f->open_array_section(stringify(c.first).c_str()); for (__u32 i = 0; i < arg_map.size; i++) { crush_choose_arg *arg = &arg_map.args[i]; if (arg->weight_set_positions == 0 && arg->ids_size == 0) continue; f->open_object_section("choose_args"); int bucket_index = i; f->dump_int("bucket_id", -1-bucket_index); if (arg->weight_set_positions > 0) { f->open_array_section("weight_set"); for (__u32 j = 0; j < arg->weight_set_positions; j++) { f->open_array_section("weights"); __u32 *weights = arg->weight_set[j].weights; __u32 size = arg->weight_set[j].size; for (__u32 k = 0; k < size; k++) { f->dump_float("weight", (float)weights[k]/(float)0x10000); } f->close_section(); } f->close_section(); } if (arg->ids_size > 0) { f->open_array_section("ids"); for (__u32 j = 0; j < arg->ids_size; j++) f->dump_int("id", arg->ids[j]); f->close_section(); } f->close_section(); } f->close_section(); } f->close_section(); } void CrushWrapper::dump_rules(Formatter *f) const { for (int i=0; i<get_max_rules(); i++) { if (!rule_exists(i)) continue; dump_rule(i, f); } } void CrushWrapper::dump_rule(int rule_id, Formatter *f) const { f->open_object_section("rule"); f->dump_int("rule_id", rule_id); if (get_rule_name(rule_id)) f->dump_string("rule_name", get_rule_name(rule_id)); f->dump_int("type", get_rule_type(rule_id)); f->open_array_section("steps"); for (int j=0; j<get_rule_len(rule_id); j++) { f->open_object_section("step"); switch (get_rule_op(rule_id, j)) { case CRUSH_RULE_NOOP: f->dump_string("op", "noop"); break; case CRUSH_RULE_TAKE: f->dump_string("op", "take"); { int item = get_rule_arg1(rule_id, j); f->dump_int("item", item); const char *name = get_item_name(item); f->dump_string("item_name", name ? name : ""); } break; case CRUSH_RULE_EMIT: f->dump_string("op", "emit"); break; case CRUSH_RULE_CHOOSE_FIRSTN: f->dump_string("op", "choose_firstn"); f->dump_int("num", get_rule_arg1(rule_id, j)); f->dump_string("type", get_type_name(get_rule_arg2(rule_id, j))); break; case CRUSH_RULE_CHOOSE_INDEP: f->dump_string("op", "choose_indep"); f->dump_int("num", get_rule_arg1(rule_id, j)); f->dump_string("type", get_type_name(get_rule_arg2(rule_id, j))); break; case CRUSH_RULE_CHOOSELEAF_FIRSTN: f->dump_string("op", "chooseleaf_firstn"); f->dump_int("num", get_rule_arg1(rule_id, j)); f->dump_string("type", get_type_name(get_rule_arg2(rule_id, j))); break; case CRUSH_RULE_CHOOSELEAF_INDEP: f->dump_string("op", "chooseleaf_indep"); f->dump_int("num", get_rule_arg1(rule_id, j)); f->dump_string("type", get_type_name(get_rule_arg2(rule_id, j))); break; case CRUSH_RULE_SET_CHOOSE_TRIES: f->dump_string("op", "set_choose_tries"); f->dump_int("num", get_rule_arg1(rule_id, j)); break; case CRUSH_RULE_SET_CHOOSELEAF_TRIES: f->dump_string("op", "set_chooseleaf_tries"); f->dump_int("num", get_rule_arg1(rule_id, j)); break; default: f->dump_int("opcode", get_rule_op(rule_id, j)); f->dump_int("arg1", get_rule_arg1(rule_id, j)); f->dump_int("arg2", get_rule_arg2(rule_id, j)); } f->close_section(); } f->close_section(); f->close_section(); } void CrushWrapper::list_rules(Formatter *f) const { for (int rule = 0; rule < get_max_rules(); rule++) { if (!rule_exists(rule)) continue; f->dump_string("name", get_rule_name(rule)); } } void CrushWrapper::list_rules(ostream *ss) const { for (int rule = 0; rule < get_max_rules(); rule++) { if (!rule_exists(rule)) continue; *ss << get_rule_name(rule) << "\n"; } } class CrushTreePlainDumper : public CrushTreeDumper::Dumper<TextTable> { public: typedef CrushTreeDumper::Dumper<TextTable> Parent; explicit CrushTreePlainDumper(const CrushWrapper *crush, const CrushTreeDumper::name_map_t& wsnames) : Parent(crush, wsnames) {} explicit CrushTreePlainDumper(const CrushWrapper *crush, const CrushTreeDumper::name_map_t& wsnames, bool show_shadow) : Parent(crush, wsnames, show_shadow) {} void dump(TextTable *tbl) { tbl->define_column("ID", TextTable::LEFT, TextTable::RIGHT); tbl->define_column("CLASS", TextTable::LEFT, TextTable::RIGHT); tbl->define_column("WEIGHT", TextTable::LEFT, TextTable::RIGHT); for (auto& p : crush->choose_args) { if (p.first == CrushWrapper::DEFAULT_CHOOSE_ARGS) { tbl->define_column("(compat)", TextTable::LEFT, TextTable::RIGHT); } else { string name; auto q = weight_set_names.find(p.first); name = q != weight_set_names.end() ? q->second : stringify(p.first); tbl->define_column(name.c_str(), TextTable::LEFT, TextTable::RIGHT); } } tbl->define_column("TYPE NAME", TextTable::LEFT, TextTable::LEFT); Parent::dump(tbl); } protected: void dump_item(const CrushTreeDumper::Item &qi, TextTable *tbl) override { const char *c = crush->get_item_class(qi.id); if (!c) c = ""; *tbl << qi.id << c << weightf_t(qi.weight); for (auto& p : crush->choose_args) { if (qi.parent < 0) { const crush_choose_arg_map cmap = crush->choose_args_get(p.first); int bidx = -1 - qi.parent; const crush_bucket *b = crush->get_bucket(qi.parent); if (b && bidx < (int)cmap.size && cmap.args[bidx].weight_set && cmap.args[bidx].weight_set_positions >= 1) { int pos; for (pos = 0; pos < (int)cmap.args[bidx].weight_set[0].size && b->items[pos] != qi.id; ++pos) ; *tbl << weightf_t((float)cmap.args[bidx].weight_set[0].weights[pos] / (float)0x10000); continue; } } *tbl << ""; } ostringstream ss; for (int k=0; k < qi.depth; k++) { ss << " "; } if (qi.is_bucket()) { ss << crush->get_type_name(crush->get_bucket_type(qi.id)) << " " << crush->get_item_name(qi.id); } else { ss << "osd." << qi.id; } *tbl << ss.str(); *tbl << TextTable::endrow; } }; class CrushTreeFormattingDumper : public CrushTreeDumper::FormattingDumper { public: typedef CrushTreeDumper::FormattingDumper Parent; explicit CrushTreeFormattingDumper( const CrushWrapper *crush, const CrushTreeDumper::name_map_t& wsnames) : Parent(crush, wsnames) {} explicit CrushTreeFormattingDumper( const CrushWrapper *crush, const CrushTreeDumper::name_map_t& wsnames, bool show_shadow) : Parent(crush, wsnames, show_shadow) {} void dump(Formatter *f) { f->open_array_section("nodes"); Parent::dump(f); f->close_section(); // There is no stray bucket whose id is a negative number, so just get // the max_id and iterate from 0 to max_id to dump stray osds. f->open_array_section("stray"); int32_t max_id = -1; if (!crush->name_map.empty()) { max_id = crush->name_map.rbegin()->first; } for (int32_t i = 0; i <= max_id; i++) { if (crush->item_exists(i) && !is_touched(i) && should_dump(i)) { dump_item(CrushTreeDumper::Item(i, 0, 0, 0), f); } } f->close_section(); } }; void CrushWrapper::dump_tree( ostream *out, Formatter *f, const CrushTreeDumper::name_map_t& weight_set_names, bool show_shadow) const { if (out) { TextTable tbl; CrushTreePlainDumper(this, weight_set_names, show_shadow).dump(&tbl); *out << tbl; } if (f) { CrushTreeFormattingDumper(this, weight_set_names, show_shadow).dump(f); } } void CrushWrapper::generate_test_instances(list<CrushWrapper*>& o) { o.push_back(new CrushWrapper); // fixme } /** * Determine the default CRUSH rule ID to be used with * newly created replicated pools. * * @returns a rule ID (>=0) or -1 if no suitable rule found */ int CrushWrapper::get_osd_pool_default_crush_replicated_rule( CephContext *cct) { int crush_rule = cct->_conf.get_val<int64_t>("osd_pool_default_crush_rule"); if (crush_rule < 0) { crush_rule = find_first_rule(pg_pool_t::TYPE_REPLICATED); } else if (!rule_exists(crush_rule)) { crush_rule = -1; // match find_first_rule() retval } return crush_rule; } bool CrushWrapper::is_valid_crush_name(const string& s) { if (s.empty()) return false; for (string::const_iterator p = s.begin(); p != s.end(); ++p) { if (!(*p == '-') && !(*p == '_') && !(*p == '.') && !(*p >= '0' && *p <= '9') && !(*p >= 'A' && *p <= 'Z') && !(*p >= 'a' && *p <= 'z')) return false; } return true; } bool CrushWrapper::is_valid_crush_loc(CephContext *cct, const map<string,string>& loc) { for (map<string,string>::const_iterator l = loc.begin(); l != loc.end(); ++l) { if (!is_valid_crush_name(l->first) || !is_valid_crush_name(l->second)) { ldout(cct, 1) << "loc[" << l->first << "] = '" << l->second << "' not a valid crush name ([A-Za-z0-9_-.]+)" << dendl; return false; } } return true; } int CrushWrapper::_choose_type_stack( CephContext *cct, const vector<pair<int,int>>& stack, const set<int>& overfull, const vector<int>& underfull, const vector<int>& more_underfull, const vector<int>& orig, vector<int>::const_iterator& i, set<int>& used, vector<int> *pw, int root_bucket, int rule) const { vector<int> w = *pw; vector<int> o; ldout(cct, 10) << __func__ << " stack " << stack << " orig " << orig << " at " << *i << " pw " << *pw << dendl; ceph_assert(root_bucket < 0); vector<int> cumulative_fanout(stack.size()); int f = 1; for (int j = (int)stack.size() - 1; j >= 0; --j) { cumulative_fanout[j] = f; f *= stack[j].second; } ldout(cct, 10) << __func__ << " cumulative_fanout " << cumulative_fanout << dendl; // identify underfull targets for each intermediate level. // this serves two purposes: // 1. we can tell when we are selecting a bucket that does not have any underfull // devices beneath it. that means that if the current input includes an overfull // device, we won't be able to find an underfull device with this parent to // swap for it. // 2. when we decide we should reject a bucket due to the above, this list gives us // a list of peers to consider that *do* have underfull devices available.. (we // are careful to pick one that has the same parent.) vector<set<int>> underfull_buckets; // level -> set of buckets with >0 underfull item(s) underfull_buckets.resize(stack.size() - 1); for (auto osd : underfull) { int item = osd; for (int j = (int)stack.size() - 2; j >= 0; --j) { int type = stack[j].first; item = get_parent_of_type(item, type, rule); ldout(cct, 10) << __func__ << " underfull " << osd << " type " << type << " is " << item << dendl; if (!subtree_contains(root_bucket, item)) { ldout(cct, 20) << __func__ << " not in root subtree " << root_bucket << dendl; continue; } underfull_buckets[j].insert(item); } } ldout(cct, 20) << __func__ << " underfull_buckets " << underfull_buckets << dendl; for (unsigned j = 0; j < stack.size(); ++j) { int type = stack[j].first; int fanout = stack[j].second; int cum_fanout = cumulative_fanout[j]; ldout(cct, 10) << " level " << j << ": type " << type << " fanout " << fanout << " cumulative " << cum_fanout << " w " << w << dendl; vector<int> o; auto tmpi = i; if (i == orig.end()) { ldout(cct, 10) << __func__ << " end of orig, break 0" << dendl; break; } for (auto from : w) { ldout(cct, 10) << " from " << from << dendl; // identify leaves under each choice. we use this to check whether any of these // leaves are overfull. (if so, we need to make sure there are underfull candidates // to swap for them.) vector<set<int>> leaves; leaves.resize(fanout); for (int pos = 0; pos < fanout; ++pos) { if (type > 0) { // non-leaf int item = get_parent_of_type(*tmpi, type, rule); o.push_back(item); int n = cum_fanout; while (n-- && tmpi != orig.end()) { leaves[pos].insert(*tmpi++); } ldout(cct, 10) << __func__ << " from " << *tmpi << " got " << item << " of type " << type << " over leaves " << leaves[pos] << dendl; } else { // leaf bool replaced = false; if (overfull.count(*i)) { for (auto item : underfull) { ldout(cct, 10) << __func__ << " pos " << pos << " was " << *i << " considering " << item << dendl; if (used.count(item)) { ldout(cct, 20) << __func__ << " in used " << used << dendl; continue; } if (!subtree_contains(from, item)) { ldout(cct, 20) << __func__ << " not in subtree " << from << dendl; continue; } if (std::find(orig.begin(), orig.end(), item) != orig.end()) { ldout(cct, 20) << __func__ << " in orig " << orig << dendl; continue; } o.push_back(item); used.insert(item); ldout(cct, 10) << __func__ << " pos " << pos << " replace " << *i << " -> " << item << dendl; replaced = true; ceph_assert(i != orig.end()); ++i; break; } if (!replaced) { for (auto item : more_underfull) { ldout(cct, 10) << __func__ << " more underfull pos " << pos << " was " << *i << " considering " << item << dendl; if (used.count(item)) { ldout(cct, 20) << __func__ << " in used " << used << dendl; continue; } if (!subtree_contains(from, item)) { ldout(cct, 20) << __func__ << " not in subtree " << from << dendl; continue; } if (std::find(orig.begin(), orig.end(), item) != orig.end()) { ldout(cct, 20) << __func__ << " in orig " << orig << dendl; continue; } o.push_back(item); used.insert(item); ldout(cct, 10) << __func__ << " pos " << pos << " replace " << *i << " -> " << item << dendl; replaced = true; assert(i != orig.end()); ++i; break; } } } if (!replaced) { ldout(cct, 10) << __func__ << " pos " << pos << " keep " << *i << dendl; ceph_assert(i != orig.end()); o.push_back(*i); ++i; } if (i == orig.end()) { ldout(cct, 10) << __func__ << " end of orig, break 1" << dendl; break; } } } if (j + 1 < stack.size()) { // check if any buckets have overfull leaves but no underfull candidates for (int pos = 0; pos < fanout; ++pos) { if (underfull_buckets[j].count(o[pos]) == 0) { // are any leaves overfull? bool any_overfull = false; for (auto osd : leaves[pos]) { if (overfull.count(osd)) { any_overfull = true; break; } } if (any_overfull) { ldout(cct, 10) << " bucket " << o[pos] << " has no underfull targets and " << ">0 leaves " << leaves[pos] << " is overfull; alts " << underfull_buckets[j] << dendl; for (auto alt : underfull_buckets[j]) { if (std::find(o.begin(), o.end(), alt) == o.end()) { // see if alt has the same parent if (j == 0 || get_parent_of_type(o[pos], stack[j-1].first, rule) == get_parent_of_type(alt, stack[j-1].first, rule)) { if (j) ldout(cct, 10) << " replacing " << o[pos] << " (which has no underfull leaves) with " << alt << " (same parent " << get_parent_of_type(alt, stack[j-1].first, rule) << " type " << type << ")" << dendl; else ldout(cct, 10) << " replacing " << o[pos] << " (which has no underfull leaves) with " << alt << " (first level)" << dendl; o[pos] = alt; break; } else { ldout(cct, 30) << " alt " << alt << " for " << o[pos] << " has different parent, skipping" << dendl; } } } } } } } if (i == orig.end()) { ldout(cct, 10) << __func__ << " end of orig, break 2" << dendl; break; } } ldout(cct, 10) << __func__ << " w <- " << o << " was " << w << dendl; w.swap(o); } *pw = w; return 0; } int CrushWrapper::try_remap_rule( CephContext *cct, int ruleno, int maxout, const set<int>& overfull, const vector<int>& underfull, const vector<int>& more_underfull, const vector<int>& orig, vector<int> *out) const { const crush_map *map = crush; const crush_rule *rule = get_rule(ruleno); ceph_assert(rule); ldout(cct, 10) << __func__ << " ruleno " << ruleno << " numrep " << maxout << " overfull " << overfull << " underfull " << underfull << " more_underfull " << more_underfull << " orig " << orig << dendl; vector<int> w; // working set out->clear(); auto i = orig.begin(); set<int> used; vector<pair<int,int>> type_stack; // (type, fan-out) int root_bucket = 0; for (unsigned step = 0; step < rule->len; ++step) { const crush_rule_step *curstep = &rule->steps[step]; ldout(cct, 10) << __func__ << " step " << step << " w " << w << dendl; switch (curstep->op) { case CRUSH_RULE_TAKE: if ((curstep->arg1 >= 0 && curstep->arg1 < map->max_devices) || (-1-curstep->arg1 >= 0 && -1-curstep->arg1 < map->max_buckets && map->buckets[-1-curstep->arg1])) { w.clear(); w.push_back(curstep->arg1); root_bucket = curstep->arg1; ldout(cct, 10) << __func__ << " take " << w << dendl; } else { ldout(cct, 1) << " bad take value " << curstep->arg1 << dendl; } break; case CRUSH_RULE_CHOOSELEAF_FIRSTN: case CRUSH_RULE_CHOOSELEAF_INDEP: { int numrep = curstep->arg1; int type = curstep->arg2; if (numrep <= 0) numrep += maxout; type_stack.push_back(make_pair(type, numrep)); if (type > 0) type_stack.push_back(make_pair(0, 1)); int r = _choose_type_stack(cct, type_stack, overfull, underfull, more_underfull, orig, i, used, &w, root_bucket, ruleno); if (r < 0) return r; type_stack.clear(); } break; case CRUSH_RULE_CHOOSE_FIRSTN: case CRUSH_RULE_CHOOSE_INDEP: { int numrep = curstep->arg1; int type = curstep->arg2; if (numrep <= 0) numrep += maxout; type_stack.push_back(make_pair(type, numrep)); } break; case CRUSH_RULE_EMIT: ldout(cct, 10) << " emit " << w << dendl; if (!type_stack.empty()) { int r = _choose_type_stack(cct, type_stack, overfull, underfull, more_underfull, orig, i, used, &w, root_bucket, ruleno); if (r < 0) return r; type_stack.clear(); } for (auto item : w) { out->push_back(item); } w.clear(); break; default: // ignore break; } } return 0; } int CrushWrapper::_choose_args_adjust_item_weight_in_bucket( CephContext *cct, crush_choose_arg_map cmap, int bucketid, int id, const vector<int>& weight, ostream *ss) { int changed = 0; int bidx = -1 - bucketid; crush_bucket *b = crush->buckets[bidx]; if (bidx >= (int)cmap.size) { if (ss) *ss << "no weight-set for bucket " << b->id; ldout(cct, 10) << __func__ << " no crush_choose_arg for bucket " << b->id << dendl; return 0; } crush_choose_arg *carg = &cmap.args[bidx]; if (carg->weight_set == NULL) { // create a weight-set for this bucket and populate it with the // bucket weights unsigned positions = get_choose_args_positions(cmap); carg->weight_set_positions = positions; carg->weight_set = static_cast<crush_weight_set*>( calloc(sizeof(crush_weight_set), positions)); for (unsigned p = 0; p < positions; ++p) { carg->weight_set[p].size = b->size; carg->weight_set[p].weights = (__u32*)calloc(b->size, sizeof(__u32)); for (unsigned i = 0; i < b->size; ++i) { carg->weight_set[p].weights[i] = crush_get_bucket_item_weight(b, i); } } changed++; } if (carg->weight_set_positions != weight.size()) { if (ss) *ss << "weight_set_positions != " << weight.size() << " for bucket " << b->id; ldout(cct, 10) << __func__ << " weight_set_positions != " << weight.size() << " for bucket " << b->id << dendl; return 0; } for (unsigned i = 0; i < b->size; i++) { if (b->items[i] == id) { for (unsigned j = 0; j < weight.size(); ++j) { carg->weight_set[j].weights[i] = weight[j]; } ldout(cct, 5) << __func__ << " set " << id << " to " << weight << " in bucket " << b->id << dendl; changed++; } } if (changed) { vector<int> bucket_weight(weight.size(), 0); for (unsigned i = 0; i < b->size; i++) { for (unsigned j = 0; j < weight.size(); ++j) { bucket_weight[j] += carg->weight_set[j].weights[i]; } } choose_args_adjust_item_weight(cct, cmap, b->id, bucket_weight, nullptr); } return changed; } int CrushWrapper::choose_args_adjust_item_weight( CephContext *cct, crush_choose_arg_map cmap, int id, const vector<int>& weight, ostream *ss) { ldout(cct, 5) << __func__ << " " << id << " weight " << weight << dendl; int changed = 0; for (int bidx = 0; bidx < crush->max_buckets; bidx++) { crush_bucket *b = crush->buckets[bidx]; if (b == nullptr) { continue; } changed += _choose_args_adjust_item_weight_in_bucket( cct, cmap, b->id, id, weight, ss); } if (!changed) { if (ss) *ss << "item " << id << " not found in crush map"; return -ENOENT; } return changed; }
119,130
27.070452
103
cc
null
ceph-main/src/crush/CrushWrapper.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CRUSH_WRAPPER_H #define CEPH_CRUSH_WRAPPER_H #include <stdlib.h> #include <map> #include <set> #include <string> #include <iosfwd> #include "include/types.h" extern "C" { #include "crush.h" #include "hash.h" #include "mapper.h" #include "builder.h" } #include "include/ceph_assert.h" #include "include/err.h" #include "include/encoding.h" #include "include/mempool.h" namespace ceph { class Formatter; } namespace CrushTreeDumper { typedef mempool::osdmap::map<int64_t,std::string> name_map_t; } inline void encode(const crush_rule_step &s, ceph::buffer::list &bl) { using ceph::encode; encode(s.op, bl); encode(s.arg1, bl); encode(s.arg2, bl); } inline void decode(crush_rule_step &s, ceph::buffer::list::const_iterator &p) { using ceph::decode; decode(s.op, p); decode(s.arg1, p); decode(s.arg2, p); } class CrushWrapper { public: // magic value used by OSDMap for a "default" fallback choose_args, used if // the choose_arg_map passed to do_rule does not exist. if this also // doesn't exist, fall back to canonical weights. enum { DEFAULT_CHOOSE_ARGS = -1 }; std::map<int32_t, std::string> type_map; // item(bucket/device) type id ==> item type name std::map<int32_t, std::string> name_map; // item id ==> item name std::map<int32_t, std::string> rule_name_map; std::map<int32_t, int32_t> class_map; /* item id -> class id */ std::map<int32_t, std::string> class_name; /* class id -> class name */ std::map<std::string, int32_t> class_rname; /* class name -> class id */ std::map<int32_t, std::map<int32_t, int32_t> > class_bucket; /* bucket[id][class] == id */ std::map<int64_t, crush_choose_arg_map> choose_args; private: struct crush_map *crush = nullptr; /* reverse maps */ mutable bool have_rmaps = false; mutable std::map<std::string, int> type_rmap, name_rmap, rule_name_rmap; void build_rmaps() const { if (have_rmaps) return; build_rmap(type_map, type_rmap); build_rmap(name_map, name_rmap); build_rmap(rule_name_map, rule_name_rmap); have_rmaps = true; } void build_rmap(const std::map<int, std::string> &f, std::map<std::string, int> &r) const { r.clear(); for (auto p = f.begin(); p != f.end(); ++p) r[p->second] = p->first; } public: CrushWrapper(const CrushWrapper& other); const CrushWrapper& operator=(const CrushWrapper& other); CrushWrapper() { create(); } ~CrushWrapper() { if (crush) crush_destroy(crush); choose_args_clear(); } crush_map *get_crush_map() { return crush; } /* building */ void create() { if (crush) crush_destroy(crush); crush = crush_create(); choose_args_clear(); ceph_assert(crush); have_rmaps = false; set_tunables_default(); } /// true if any buckets that aren't straw2 bool has_non_straw2_buckets() const; // tunables void set_tunables_argonaut() { crush->choose_local_tries = 2; crush->choose_local_fallback_tries = 5; crush->choose_total_tries = 19; crush->chooseleaf_descend_once = 0; crush->chooseleaf_vary_r = 0; crush->chooseleaf_stable = 0; crush->allowed_bucket_algs = CRUSH_LEGACY_ALLOWED_BUCKET_ALGS; } void set_tunables_bobtail() { crush->choose_local_tries = 0; crush->choose_local_fallback_tries = 0; crush->choose_total_tries = 50; crush->chooseleaf_descend_once = 1; crush->chooseleaf_vary_r = 0; crush->chooseleaf_stable = 0; crush->allowed_bucket_algs = CRUSH_LEGACY_ALLOWED_BUCKET_ALGS; } void set_tunables_firefly() { crush->choose_local_tries = 0; crush->choose_local_fallback_tries = 0; crush->choose_total_tries = 50; crush->chooseleaf_descend_once = 1; crush->chooseleaf_vary_r = 1; crush->chooseleaf_stable = 0; crush->allowed_bucket_algs = CRUSH_LEGACY_ALLOWED_BUCKET_ALGS; } void set_tunables_hammer() { crush->choose_local_tries = 0; crush->choose_local_fallback_tries = 0; crush->choose_total_tries = 50; crush->chooseleaf_descend_once = 1; crush->chooseleaf_vary_r = 1; crush->chooseleaf_stable = 0; crush->allowed_bucket_algs = (1 << CRUSH_BUCKET_UNIFORM) | (1 << CRUSH_BUCKET_LIST) | (1 << CRUSH_BUCKET_STRAW) | (1 << CRUSH_BUCKET_STRAW2); } void set_tunables_jewel() { crush->choose_local_tries = 0; crush->choose_local_fallback_tries = 0; crush->choose_total_tries = 50; crush->chooseleaf_descend_once = 1; crush->chooseleaf_vary_r = 1; crush->chooseleaf_stable = 1; crush->allowed_bucket_algs = (1 << CRUSH_BUCKET_UNIFORM) | (1 << CRUSH_BUCKET_LIST) | (1 << CRUSH_BUCKET_STRAW) | (1 << CRUSH_BUCKET_STRAW2); } void set_tunables_legacy() { set_tunables_argonaut(); crush->straw_calc_version = 0; } void set_tunables_optimal() { set_tunables_jewel(); crush->straw_calc_version = 1; } void set_tunables_default() { set_tunables_jewel(); crush->straw_calc_version = 1; } int get_choose_local_tries() const { return crush->choose_local_tries; } void set_choose_local_tries(int n) { crush->choose_local_tries = n; } int get_choose_local_fallback_tries() const { return crush->choose_local_fallback_tries; } void set_choose_local_fallback_tries(int n) { crush->choose_local_fallback_tries = n; } int get_choose_total_tries() const { return crush->choose_total_tries; } void set_choose_total_tries(int n) { crush->choose_total_tries = n; } int get_chooseleaf_descend_once() const { return crush->chooseleaf_descend_once; } void set_chooseleaf_descend_once(int n) { crush->chooseleaf_descend_once = !!n; } int get_chooseleaf_vary_r() const { return crush->chooseleaf_vary_r; } void set_chooseleaf_vary_r(int n) { crush->chooseleaf_vary_r = n; } int get_chooseleaf_stable() const { return crush->chooseleaf_stable; } void set_chooseleaf_stable(int n) { crush->chooseleaf_stable = n; } int get_straw_calc_version() const { return crush->straw_calc_version; } void set_straw_calc_version(int n) { crush->straw_calc_version = n; } unsigned get_allowed_bucket_algs() const { return crush->allowed_bucket_algs; } void set_allowed_bucket_algs(unsigned n) { crush->allowed_bucket_algs = n; } bool has_argonaut_tunables() const { return crush->choose_local_tries == 2 && crush->choose_local_fallback_tries == 5 && crush->choose_total_tries == 19 && crush->chooseleaf_descend_once == 0 && crush->chooseleaf_vary_r == 0 && crush->chooseleaf_stable == 0 && crush->allowed_bucket_algs == CRUSH_LEGACY_ALLOWED_BUCKET_ALGS; } bool has_bobtail_tunables() const { return crush->choose_local_tries == 0 && crush->choose_local_fallback_tries == 0 && crush->choose_total_tries == 50 && crush->chooseleaf_descend_once == 1 && crush->chooseleaf_vary_r == 0 && crush->chooseleaf_stable == 0 && crush->allowed_bucket_algs == CRUSH_LEGACY_ALLOWED_BUCKET_ALGS; } bool has_firefly_tunables() const { return crush->choose_local_tries == 0 && crush->choose_local_fallback_tries == 0 && crush->choose_total_tries == 50 && crush->chooseleaf_descend_once == 1 && crush->chooseleaf_vary_r == 1 && crush->chooseleaf_stable == 0 && crush->allowed_bucket_algs == CRUSH_LEGACY_ALLOWED_BUCKET_ALGS; } bool has_hammer_tunables() const { return crush->choose_local_tries == 0 && crush->choose_local_fallback_tries == 0 && crush->choose_total_tries == 50 && crush->chooseleaf_descend_once == 1 && crush->chooseleaf_vary_r == 1 && crush->chooseleaf_stable == 0 && crush->allowed_bucket_algs == ((1 << CRUSH_BUCKET_UNIFORM) | (1 << CRUSH_BUCKET_LIST) | (1 << CRUSH_BUCKET_STRAW) | (1 << CRUSH_BUCKET_STRAW2)); } bool has_jewel_tunables() const { return crush->choose_local_tries == 0 && crush->choose_local_fallback_tries == 0 && crush->choose_total_tries == 50 && crush->chooseleaf_descend_once == 1 && crush->chooseleaf_vary_r == 1 && crush->chooseleaf_stable == 1 && crush->allowed_bucket_algs == ((1 << CRUSH_BUCKET_UNIFORM) | (1 << CRUSH_BUCKET_LIST) | (1 << CRUSH_BUCKET_STRAW) | (1 << CRUSH_BUCKET_STRAW2)); } bool has_optimal_tunables() const { return has_jewel_tunables(); } bool has_legacy_tunables() const { return has_argonaut_tunables(); } bool has_nondefault_tunables() const { return (crush->choose_local_tries != 2 || crush->choose_local_fallback_tries != 5 || crush->choose_total_tries != 19); } bool has_nondefault_tunables2() const { return crush->chooseleaf_descend_once != 0; } bool has_nondefault_tunables3() const { return crush->chooseleaf_vary_r != 0; } bool has_nondefault_tunables5() const { return crush->chooseleaf_stable != 0; } bool has_v2_rules() const; bool has_v3_rules() const; bool has_v4_buckets() const; bool has_v5_rules() const; bool has_choose_args() const; // any choose_args bool has_incompat_choose_args() const; // choose_args that can't be made compat bool is_v2_rule(unsigned ruleid) const; bool is_v3_rule(unsigned ruleid) const; bool is_v5_rule(unsigned ruleid) const; std::string get_min_required_version() const { if (has_v5_rules() || has_nondefault_tunables5()) return "jewel"; else if (has_v4_buckets()) return "hammer"; else if (has_nondefault_tunables3()) return "firefly"; else if (has_nondefault_tunables2() || has_nondefault_tunables()) return "bobtail"; else return "argonaut"; } // default bucket types unsigned get_default_bucket_alg() const { // in order of preference if (crush->allowed_bucket_algs & (1 << CRUSH_BUCKET_STRAW2)) return CRUSH_BUCKET_STRAW2; if (crush->allowed_bucket_algs & (1 << CRUSH_BUCKET_STRAW)) return CRUSH_BUCKET_STRAW; if (crush->allowed_bucket_algs & (1 << CRUSH_BUCKET_TREE)) return CRUSH_BUCKET_TREE; if (crush->allowed_bucket_algs & (1 << CRUSH_BUCKET_LIST)) return CRUSH_BUCKET_LIST; if (crush->allowed_bucket_algs & (1 << CRUSH_BUCKET_UNIFORM)) return CRUSH_BUCKET_UNIFORM; return 0; } // bucket types int get_num_type_names() const { return type_map.size(); } int get_max_type_id() const { if (type_map.empty()) return 0; return type_map.rbegin()->first; } int get_type_id(const std::string& name) const { build_rmaps(); auto found = type_rmap.find(name); if (found != type_rmap.end()) { return found->second; } else { return -1; } } std::optional<int> get_validated_type_id(const std::string& name) const { int retval = get_type_id(name); if (retval == -1 && !type_rmap.count(name)) { return {}; } else { return retval; } } const char *get_type_name(int t) const { auto p = type_map.find(t); if (p != type_map.end()) return p->second.c_str(); return 0; } void set_type_name(int i, const std::string& name) { type_map[i] = name; if (have_rmaps) type_rmap[name] = i; } // item/bucket names bool name_exists(const std::string& name) const { build_rmaps(); return name_rmap.count(name); } bool item_exists(int i) const { return name_map.count(i); } int get_item_id(const std::string& name) const { build_rmaps(); if (name_rmap.count(name)) return name_rmap[name]; return 0; /* hrm */ } const char *get_item_name(int t) const { std::map<int,std::string>::const_iterator p = name_map.find(t); if (p != name_map.end()) return p->second.c_str(); return 0; } int set_item_name(int i, const std::string& name) { if (!is_valid_crush_name(name)) return -EINVAL; name_map[i] = name; if (have_rmaps) name_rmap[name] = i; return 0; } void swap_names(int a, int b) { std::string an = name_map[a]; std::string bn = name_map[b]; name_map[a] = bn; name_map[b] = an; if (have_rmaps) { name_rmap[an] = b; name_rmap[bn] = a; } } int split_id_class(int i, int *idout, int *classout) const; bool class_exists(const std::string& name) const { return class_rname.count(name); } const char *get_class_name(int i) const { auto p = class_name.find(i); if (p != class_name.end()) return p->second.c_str(); return 0; } int get_class_id(const std::string& name) const { auto p = class_rname.find(name); if (p != class_rname.end()) return p->second; else return -EINVAL; } int remove_class_name(const std::string& name) { auto p = class_rname.find(name); if (p == class_rname.end()) return -ENOENT; int class_id = p->second; auto q = class_name.find(class_id); if (q == class_name.end()) return -ENOENT; class_rname.erase(name); class_name.erase(class_id); return 0; } int32_t _alloc_class_id() const; int get_or_create_class_id(const std::string& name) { int c = get_class_id(name); if (c < 0) { int i = _alloc_class_id(); class_name[i] = name; class_rname[name] = i; return i; } else { return c; } } const char *get_item_class(int t) const { std::map<int,int>::const_iterator p = class_map.find(t); if (p == class_map.end()) return 0; return get_class_name(p->second); } int get_item_class_id(int t) const { auto p = class_map.find(t); if (p == class_map.end()) return -ENOENT; return p->second; } int set_item_class(int i, const std::string& name) { if (!is_valid_crush_name(name)) return -EINVAL; class_map[i] = get_or_create_class_id(name); return 0; } int set_item_class(int i, int c) { class_map[i] = c; return c; } void get_devices_by_class(const std::string &name, std::set<int> *devices) const { ceph_assert(devices); devices->clear(); if (!class_exists(name)) { return; } auto cid = get_class_id(name); for (auto& p : class_map) { if (p.first >= 0 && p.second == cid) { devices->insert(p.first); } } } void class_remove_item(int i) { auto it = class_map.find(i); if (it == class_map.end()) { return; } class_map.erase(it); } int can_rename_item(const std::string& srcname, const std::string& dstname, std::ostream *ss) const; int rename_item(const std::string& srcname, const std::string& dstname, std::ostream *ss); int can_rename_bucket(const std::string& srcname, const std::string& dstname, std::ostream *ss) const; int rename_bucket(const std::string& srcname, const std::string& dstname, std::ostream *ss); // rule names int rename_rule(const std::string& srcname, const std::string& dstname, std::ostream *ss); bool rule_exists(std::string name) const { build_rmaps(); return rule_name_rmap.count(name); } int get_rule_id(std::string name) const { build_rmaps(); if (rule_name_rmap.count(name)) return rule_name_rmap[name]; return -ENOENT; } const char *get_rule_name(int t) const { auto p = rule_name_map.find(t); if (p != rule_name_map.end()) return p->second.c_str(); return 0; } void set_rule_name(int i, const std::string& name) { rule_name_map[i] = name; if (have_rmaps) rule_name_rmap[name] = i; } bool is_shadow_item(int id) const { const char *name = get_item_name(id); return name && !is_valid_crush_name(name); } /** * find tree nodes referenced by rules by a 'take' command * * Note that these may not be parentless roots. */ void find_takes(std::set<int> *roots) const; void find_takes_by_rule(int rule, std::set<int> *roots) const; /** * find tree roots * * These are parentless nodes in the map. */ void find_roots(std::set<int> *roots) const; /** * find tree roots that contain shadow (device class) items only */ void find_shadow_roots(std::set<int> *roots) const { std::set<int> all; find_roots(&all); for (auto& p: all) { if (is_shadow_item(p)) { roots->insert(p); } } } /** * find tree roots that are not shadow (device class) items * * These are parentless nodes in the map that are not shadow * items for device classes. */ void find_nonshadow_roots(std::set<int> *roots) const { std::set<int> all; find_roots(&all); for (auto& p: all) { if (!is_shadow_item(p)) { roots->insert(p); } } } /** * see if an item is contained within a subtree * * @param root haystack * @param item needle * @return true if the item is located beneath the given node */ bool subtree_contains(int root, int item) const; private: /** * search for an item in any bucket * * @param i item * @return true if present */ bool _search_item_exists(int i) const; bool is_parent_of(int child, int p) const; public: /** * see if item is located where we think it is * * This verifies that the given item is located at a particular * location in the hierarchy. However, that check is imprecise; we * are actually verifying that the most specific location key/value * is correct. For example, if loc specifies that rack=foo and * host=bar, it will verify that host=bar is correct; any placement * above that level in the hierarchy is ignored. This matches the * semantics for insert_item(). * * @param cct cct * @param item item id * @param loc location to check (map of type to bucket names) * @param weight optional pointer to weight of item at that location * @return true if item is at specified location */ bool check_item_loc(CephContext *cct, int item, const std::map<std::string,std::string>& loc, int *iweight); bool check_item_loc(CephContext *cct, int item, const std::map<std::string,std::string>& loc, float *weight) { int iweight; bool ret = check_item_loc(cct, item, loc, &iweight); if (weight) *weight = (float)iweight / (float)0x10000; return ret; } /** * returns the (type, name) of the parent bucket of id * * FIXME: ambiguous for items that occur multiple times in the map */ std::pair<std::string,std::string> get_immediate_parent(int id, int *ret = NULL) const; int get_immediate_parent_id(int id, int *parent) const; /** * return ancestor of the given type, or 0 if none * can pass in a specific crush **rule** to return ancestor from that rule only * (parent is always a bucket and thus <0) */ int get_parent_of_type(int id, int type, int rule = -1) const; /** * get the fully qualified location of a device by successively finding * parents beginning at ID and ending at highest type number specified in * the CRUSH map which assumes that if device foo is under device bar, the * type_id of foo < bar where type_id is the integer specified in the CRUSH map * * returns the location in the form of (type=foo) where type is a type of bucket * specified in the CRUSH map and foo is a name specified in the CRUSH map */ std::map<std::string, std::string> get_full_location(int id) const; /** * return location map for a item, by name */ int get_full_location( const std::string& name, std::map<std::string,std::string> *ploc); /* * identical to get_full_location(int id) although it returns the type/name * pairs in the order they occur in the hierarchy. * * returns -ENOENT if id is not found. */ int get_full_location_ordered( int id, std::vector<std::pair<std::string, std::string> >& path) const; /* * identical to get_full_location_ordered(int id, vector<pair<string, string> >& path), * although it returns a concatenated string with the type/name pairs in descending * hierarchical order with format key1=val1,key2=val2. * * returns the location in descending hierarchy as a string. */ std::string get_full_location_ordered_string(int id) const; /** * returns (type_id, type) of all parent buckets between id and * default, can be used to check for anomalous CRUSH maps */ std::map<int, std::string> get_parent_hierarchy(int id) const; /** * enumerate immediate children of given node * * @param id parent bucket or device id * @return number of items, or error */ int get_children(int id, std::list<int> *children) const; /** * enumerate all children of given node * * @param id parent bucket or device id * @return number of items, or error */ int get_all_children(int id, std::set<int> *children) const; void get_children_of_type(int id, int type, std::vector<int> *children, bool exclude_shadow = true) const; /** * enumerate all subtrees by type */ void get_subtree_of_type(int type, std::vector<int> *subtrees); /** * verify upmapping results. * return 0 on success or a negative errno on error. */ int verify_upmap(CephContext *cct, int rule_id, int pool_size, const std::vector<int>& up); /** * enumerate leaves(devices) of given node * * @param name parent bucket name * @return 0 on success or a negative errno on error. */ int get_leaves(const std::string &name, std::set<int> *leaves) const; private: int _get_leaves(int id, std::list<int> *leaves) const; // worker public: /** * insert an item into the map at a specific position * * Add an item at a specific location of the hierarchy. * Specifically, we look for the most specific location constraint * for which a bucket already exists, and then create intervening * buckets beneath that in order to place the item. * * Note that any location specifiers *above* the most specific match * are ignored. For example, if we specify that osd.12 goes in * host=foo, rack=bar, and row=baz, and rack=bar is the most * specific match, we will create host=foo beneath that point and * put osd.12 inside it. However, we will not verify that rack=bar * is beneath row=baz or move it. * * In short, we will build out a hierarchy, and move leaves around, * but not adjust the hierarchy's internal structure. Yet. * * If the item is already present in the map, we will return EEXIST. * If the location key/value pairs are nonsensical * (rack=nameofdevice), or location specifies that do not attach us * to any existing part of the hierarchy, we will return EINVAL. * * @param cct cct * @param id item id * @param weight item weight * @param name item name * @param loc location (map of type to bucket names) * @param init_weight_sets initialize weight-set weights to weight (vs 0) * @return 0 for success, negative on error */ int insert_item(CephContext *cct, int id, float weight, std::string name, const std::map<std::string,std::string>& loc, bool init_weight_sets=true); /** * move a bucket in the hierarchy to the given location * * This has the same location and ancestor creation behavior as * insert_item(), but will relocate the specified existing bucket. * * @param cct cct * @param id bucket id * @param loc location (map of type to bucket names) * @return 0 for success, negative on error */ int move_bucket(CephContext *cct, int id, const std::map<std::string,std::string>& loc); /** * swap bucket contents of two buckets without touching bucket ids * * @param cct cct * @param src bucket a * @param dst bucket b * @return 0 for success, negative on error */ int swap_bucket(CephContext *cct, int src, int dst); /** * add a link to an existing bucket in the hierarchy to the new location * * This has the same location and ancestor creation behavior as * insert_item(), but will add a new link to the specified existing * bucket. * * @param cct cct * @param id bucket id * @param loc location (map of type to bucket names) * @return 0 for success, negative on error */ int link_bucket(CephContext *cct, int id, const std::map<std::string,std::string>& loc); /** * add or update an item's position in the map * * This is analogous to insert_item, except we will move an item if * it is already present. * * @param cct cct * @param id item id * @param weight item weight * @param name item name * @param loc location (map of type to bucket names) * @return 0 for no change, 1 for successful change, negative on error */ int update_item(CephContext *cct, int id, float weight, std::string name, const std::map<std::string, std::string>& loc); /** * create or move an item, but do not adjust its weight if it already exists * * @param cct cct * @param item item id * @param weight initial item weight (if we need to create it) * @param name item name * @param loc location (map of type to bucket names) * @param init_weight_sets initialize weight-set values to weight (vs 0) * @return 0 for no change, 1 for successful change, negative on error */ int create_or_move_item(CephContext *cct, int item, float weight, std::string name, const std::map<std::string,std::string>& loc, bool init_weight_sets=true); /** * remove all instances of an item from the map * * @param cct cct * @param id item id to remove * @param unlink_only unlink but do not remove bucket (useful if multiple links or not empty) * @return 0 on success, negative on error */ int remove_item(CephContext *cct, int id, bool unlink_only); /** * recursively remove buckets starting at item and stop removing * when a bucket is in use. * * @param item id to remove * @return 0 on success, negative on error */ int remove_root(CephContext *cct, int item); /** * remove all instances of an item nested beneath a certain point from the map * * @param cct cct * @param id item id to remove * @param ancestor ancestor item id under which to search for id * @param unlink_only unlink but do not remove bucket (useful if bucket has multiple links or is not empty) * @return 0 on success, negative on error */ private: bool _maybe_remove_last_instance(CephContext *cct, int id, bool unlink_only); int _remove_item_under(CephContext *cct, int id, int ancestor, bool unlink_only); bool _bucket_is_in_use(int id); public: int remove_item_under(CephContext *cct, int id, int ancestor, bool unlink_only); /** * calculate the locality/distance from a given id to a crush location map * * Specifically, we look for the lowest-valued type for which the * location of id matches that described in loc. * * @param cct cct * @param id the existing id in the map * @param loc a set of key=value pairs describing a location in the hierarchy */ int get_common_ancestor_distance(CephContext *cct, int id, const std::multimap<std::string,std::string>& loc) const; /** * parse a set of key/value pairs out of a string vector * * These are used to describe a location in the CRUSH hierarchy. * * @param args list of strings (each key= or key=value) * @param ploc pointer to a resulting location map or multimap */ static int parse_loc_map(const std::vector<std::string>& args, std::map<std::string,std::string> *ploc); static int parse_loc_multimap(const std::vector<std::string>& args, std::multimap<std::string,std::string> *ploc); /** * get an item's weight * * Will return the weight for the first instance it finds. * * @param id item id to check * @return weight of item */ int get_item_weight(int id) const; float get_item_weightf(int id) const { return (float)get_item_weight(id) / (float)0x10000; } int get_item_weight_in_loc(int id, const std::map<std::string, std::string> &loc); float get_item_weightf_in_loc(int id, const std::map<std::string, std::string> &loc) { return (float)get_item_weight_in_loc(id, loc) / (float)0x10000; } int validate_weightf(float weight) { uint64_t iweight = weight * 0x10000; if (iweight > static_cast<uint64_t>(std::numeric_limits<int>::max())) { return -EOVERFLOW; } return 0; } int adjust_item_weight(CephContext *cct, int id, int weight, bool update_weight_sets=true); int adjust_item_weightf(CephContext *cct, int id, float weight, bool update_weight_sets=true) { int r = validate_weightf(weight); if (r < 0) { return r; } return adjust_item_weight(cct, id, (int)(weight * (float)0x10000), update_weight_sets); } int adjust_item_weight_in_bucket(CephContext *cct, int id, int weight, int bucket_id, bool update_weight_sets); int adjust_item_weight_in_loc(CephContext *cct, int id, int weight, const std::map<std::string,std::string>& loc, bool update_weight_sets=true); int adjust_item_weightf_in_loc(CephContext *cct, int id, float weight, const std::map<std::string,std::string>& loc, bool update_weight_sets=true) { int r = validate_weightf(weight); if (r < 0) { return r; } return adjust_item_weight_in_loc(cct, id, (int)(weight * (float)0x10000), loc, update_weight_sets); } void reweight(CephContext *cct); void reweight_bucket(crush_bucket *b, crush_choose_arg_map& arg_map, std::vector<uint32_t> *weightv); int adjust_subtree_weight(CephContext *cct, int id, int weight, bool update_weight_sets=true); int adjust_subtree_weightf(CephContext *cct, int id, float weight, bool update_weight_sets=true) { int r = validate_weightf(weight); if (r < 0) { return r; } return adjust_subtree_weight(cct, id, (int)(weight * (float)0x10000), update_weight_sets); } /// check if item id is present in the map hierarchy bool check_item_present(int id) const; /*** devices ***/ int get_max_devices() const { if (!crush) return 0; return crush->max_devices; } /*** rules ***/ private: crush_rule *get_rule(unsigned ruleno) const { if (!crush) return (crush_rule *)(-ENOENT); if (ruleno >= crush->max_rules) return 0; return crush->rules[ruleno]; } crush_rule_step *get_rule_step(unsigned ruleno, unsigned step) const { crush_rule *n = get_rule(ruleno); if (IS_ERR(n)) return (crush_rule_step *)(-EINVAL); if (step >= n->len) return (crush_rule_step *)(-EINVAL); return &n->steps[step]; } public: /* accessors */ int get_max_rules() const { if (!crush) return 0; return crush->max_rules; } bool rule_exists(unsigned ruleno) const { if (!crush) return false; if (ruleno < crush->max_rules && crush->rules[ruleno] != NULL) return true; return false; } bool rule_has_take(unsigned ruleno, int take) const { if (!crush) return false; crush_rule *rule = get_rule(ruleno); for (unsigned i = 0; i < rule->len; ++i) { if (rule->steps[i].op == CRUSH_RULE_TAKE && rule->steps[i].arg1 == take) { return true; } } return false; } int get_rule_len(unsigned ruleno) const { crush_rule *r = get_rule(ruleno); if (IS_ERR(r)) return PTR_ERR(r); return r->len; } int get_rule_type(unsigned ruleno) const { crush_rule *r = get_rule(ruleno); if (IS_ERR(r)) return -1; return r->type; } int get_rule_op(unsigned ruleno, unsigned step) const { crush_rule_step *s = get_rule_step(ruleno, step); if (IS_ERR(s)) return PTR_ERR(s); return s->op; } int get_rule_arg1(unsigned ruleno, unsigned step) const { crush_rule_step *s = get_rule_step(ruleno, step); if (IS_ERR(s)) return PTR_ERR(s); return s->arg1; } int get_rule_arg2(unsigned ruleno, unsigned step) const { crush_rule_step *s = get_rule_step(ruleno, step); if (IS_ERR(s)) return PTR_ERR(s); return s->arg2; } private: float _get_take_weight_osd_map(int root, std::map<int,float> *pmap) const; void _normalize_weight_map(float sum, const std::map<int,float>& m, std::map<int,float> *pmap) const; public: /** * calculate a map of osds to weights for a given rule * * Generate a map of which OSDs get how much relative weight for a * given rule. * * @param ruleno [in] rule id * @param pmap [out] map of osd to weight * @return 0 for success, or negative error code */ int get_rule_weight_osd_map(unsigned ruleno, std::map<int,float> *pmap) const; /** * calculate a map of osds to weights for a given starting root * * Generate a map of which OSDs get how much relative weight for a * given starting root * * @param root node * @param pmap [out] map of osd to weight * @return 0 for success, or negative error code */ int get_take_weight_osd_map(int root, std::map<int,float> *pmap) const; /* modifiers */ int add_rule(int ruleno, int len, int type) { if (!crush) return -ENOENT; crush_rule *n = crush_make_rule(len, type); ceph_assert(n); ruleno = crush_add_rule(crush, n, ruleno); return ruleno; } int set_rule_step(unsigned ruleno, unsigned step, int op, int arg1, int arg2) { if (!crush) return -ENOENT; crush_rule *n = get_rule(ruleno); if (!n) return -1; crush_rule_set_step(n, step, op, arg1, arg2); return 0; } int set_rule_step_take(unsigned ruleno, unsigned step, int val) { return set_rule_step(ruleno, step, CRUSH_RULE_TAKE, val, 0); } int set_rule_step_set_choose_tries(unsigned ruleno, unsigned step, int val) { return set_rule_step(ruleno, step, CRUSH_RULE_SET_CHOOSE_TRIES, val, 0); } int set_rule_step_set_choose_local_tries(unsigned ruleno, unsigned step, int val) { return set_rule_step(ruleno, step, CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES, val, 0); } int set_rule_step_set_choose_local_fallback_tries(unsigned ruleno, unsigned step, int val) { return set_rule_step(ruleno, step, CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES, val, 0); } int set_rule_step_set_chooseleaf_tries(unsigned ruleno, unsigned step, int val) { return set_rule_step(ruleno, step, CRUSH_RULE_SET_CHOOSELEAF_TRIES, val, 0); } int set_rule_step_set_chooseleaf_vary_r(unsigned ruleno, unsigned step, int val) { return set_rule_step(ruleno, step, CRUSH_RULE_SET_CHOOSELEAF_VARY_R, val, 0); } int set_rule_step_set_chooseleaf_stable(unsigned ruleno, unsigned step, int val) { return set_rule_step(ruleno, step, CRUSH_RULE_SET_CHOOSELEAF_STABLE, val, 0); } int set_rule_step_choose_firstn(unsigned ruleno, unsigned step, int val, int type) { return set_rule_step(ruleno, step, CRUSH_RULE_CHOOSE_FIRSTN, val, type); } int set_rule_step_choose_indep(unsigned ruleno, unsigned step, int val, int type) { return set_rule_step(ruleno, step, CRUSH_RULE_CHOOSE_INDEP, val, type); } int set_rule_step_choose_leaf_firstn(unsigned ruleno, unsigned step, int val, int type) { return set_rule_step(ruleno, step, CRUSH_RULE_CHOOSELEAF_FIRSTN, val, type); } int set_rule_step_choose_leaf_indep(unsigned ruleno, unsigned step, int val, int type) { return set_rule_step(ruleno, step, CRUSH_RULE_CHOOSELEAF_INDEP, val, type); } int set_rule_step_emit(unsigned ruleno, unsigned step) { return set_rule_step(ruleno, step, CRUSH_RULE_EMIT, 0, 0); } int add_simple_rule( std::string name, std::string root_name, std::string failure_domain_type, std::string device_class, std::string mode, int rule_type, std::ostream *err = 0); /** * @param rno rule[set] id to use, -1 to pick the lowest available */ int add_simple_rule_at( std::string name, std::string root_name, std::string failure_domain_type, std::string device_class, std::string mode, int rule_type, int rno, std::ostream *err = 0); int remove_rule(int ruleno); /** buckets **/ const crush_bucket *get_bucket(int id) const { if (!crush) return (crush_bucket *)(-EINVAL); unsigned int pos = (unsigned int)(-1 - id); unsigned int max_buckets = crush->max_buckets; if (pos >= max_buckets) return (crush_bucket *)(-ENOENT); crush_bucket *ret = crush->buckets[pos]; if (ret == NULL) return (crush_bucket *)(-ENOENT); return ret; } private: crush_bucket *get_bucket(int id) { if (!crush) return (crush_bucket *)(-EINVAL); unsigned int pos = (unsigned int)(-1 - id); unsigned int max_buckets = crush->max_buckets; if (pos >= max_buckets) return (crush_bucket *)(-ENOENT); crush_bucket *ret = crush->buckets[pos]; if (ret == NULL) return (crush_bucket *)(-ENOENT); return ret; } /** * detach a bucket from its parent and adjust the parent weight * * returns the weight of the detached bucket **/ int detach_bucket(CephContext *cct, int item); int get_new_bucket_id(); public: int get_max_buckets() const { if (!crush) return -EINVAL; return crush->max_buckets; } int get_next_bucket_id() const { if (!crush) return -EINVAL; return crush_get_next_bucket_id(crush); } bool bucket_exists(int id) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return false; return true; } int get_bucket_weight(int id) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); return b->weight; } float get_bucket_weightf(int id) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return 0; return b->weight / (float)0x10000; } int get_bucket_type(int id) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); return b->type; } int get_bucket_alg(int id) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); return b->alg; } int get_bucket_hash(int id) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); return b->hash; } int get_bucket_size(int id) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); return b->size; } int get_bucket_item(int id, int pos) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); if ((__u32)pos >= b->size) return PTR_ERR(b); return b->items[pos]; } int get_bucket_item_weight(int id, int pos) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return PTR_ERR(b); return crush_get_bucket_item_weight(b, pos); } float get_bucket_item_weightf(int id, int pos) const { const crush_bucket *b = get_bucket(id); if (IS_ERR(b)) return 0; return (float)crush_get_bucket_item_weight(b, pos) / (float)0x10000; } /* modifiers */ int add_bucket(int bucketno, int alg, int hash, int type, int size, int *items, int *weights, int *idout); int bucket_add_item(crush_bucket *bucket, int item, int weight); int bucket_remove_item(struct crush_bucket *bucket, int item); int bucket_adjust_item_weight( CephContext *cct, struct crush_bucket *bucket, int item, int weight, bool adjust_weight_sets); void finalize() { ceph_assert(crush); crush_finalize(crush); if (!name_map.empty() && name_map.rbegin()->first >= crush->max_devices) { crush->max_devices = name_map.rbegin()->first + 1; } build_rmaps(); } int bucket_set_alg(int id, int alg); int update_device_class(int id, const std::string& class_name, const std::string& name, std::ostream *ss); int remove_device_class(CephContext *cct, int id, std::ostream *ss); int device_class_clone( int original, int device_class, const std::map<int32_t, std::map<int32_t, int32_t>>& old_class_bucket, const std::set<int32_t>& used_ids, int *clone, std::map<int, std::map<int,std::vector<int>>> *cmap_item_weight); bool class_is_in_use(int class_id, std::ostream *ss = nullptr); int rename_class(const std::string& srcname, const std::string& dstname); int populate_classes( const std::map<int32_t, std::map<int32_t, int32_t>>& old_class_bucket); int get_rules_by_class(const std::string &class_name, std::set<int> *rules); int get_rules_by_osd(int osd, std::set<int> *rules); bool _class_is_dead(int class_id); void cleanup_dead_classes(); int rebuild_roots_with_classes(CephContext *cct); /* remove unused roots generated for class devices */ int trim_roots_with_class(CephContext *cct); int reclassify( CephContext *cct, std::ostream& out, const std::map<std::string,std::string>& classify_root, const std::map<std::string,std::pair<std::string,std::string>>& classify_bucket ); int set_subtree_class(const std::string& name, const std::string& class_name); void start_choose_profile() { free(crush->choose_tries); /* * the original choose_total_tries value was off by one (it * counted "retries" and not "tries"). add one to alloc. */ crush->choose_tries = (__u32 *)calloc(sizeof(*crush->choose_tries), (crush->choose_total_tries + 1)); memset(crush->choose_tries, 0, sizeof(*crush->choose_tries) * (crush->choose_total_tries + 1)); } void stop_choose_profile() { free(crush->choose_tries); crush->choose_tries = 0; } int get_choose_profile(__u32 **vec) { if (crush->choose_tries) { *vec = crush->choose_tries; return crush->choose_total_tries; } return 0; } void set_max_devices(int m) { crush->max_devices = m; } /** * Return the lowest numbered rule of type `type` * * @returns a rule ID, or -1 if no matching rules found. */ int find_first_rule(int type) const { for (size_t i = 0; i < crush->max_rules; ++i) { if (crush->rules[i] && crush->rules[i]->type == type) { return i; } } return -1; } bool have_choose_args(int64_t choose_args_index) const { return choose_args.count(choose_args_index); } crush_choose_arg_map choose_args_get_with_fallback( int64_t choose_args_index) const { auto i = choose_args.find(choose_args_index); if (i == choose_args.end()) { i = choose_args.find(DEFAULT_CHOOSE_ARGS); } if (i == choose_args.end()) { crush_choose_arg_map arg_map; arg_map.args = NULL; arg_map.size = 0; return arg_map; } else { return i->second; } } crush_choose_arg_map choose_args_get(int64_t choose_args_index) const { auto i = choose_args.find(choose_args_index); if (i == choose_args.end()) { crush_choose_arg_map arg_map; arg_map.args = NULL; arg_map.size = 0; return arg_map; } else { return i->second; } } void destroy_choose_args(crush_choose_arg_map arg_map) { for (__u32 i = 0; i < arg_map.size; i++) { crush_choose_arg *arg = &arg_map.args[i]; for (__u32 j = 0; j < arg->weight_set_positions; j++) { crush_weight_set *weight_set = &arg->weight_set[j]; free(weight_set->weights); } if (arg->weight_set) free(arg->weight_set); if (arg->ids) free(arg->ids); } free(arg_map.args); } bool create_choose_args(int64_t id, int positions) { if (choose_args.count(id)) return false; ceph_assert(positions); auto &cmap = choose_args[id]; cmap.args = static_cast<crush_choose_arg*>(calloc(sizeof(crush_choose_arg), crush->max_buckets)); cmap.size = crush->max_buckets; for (int bidx=0; bidx < crush->max_buckets; ++bidx) { crush_bucket *b = crush->buckets[bidx]; auto &carg = cmap.args[bidx]; carg.ids = NULL; carg.ids_size = 0; if (b && b->alg == CRUSH_BUCKET_STRAW2) { crush_bucket_straw2 *sb = reinterpret_cast<crush_bucket_straw2*>(b); carg.weight_set_positions = positions; carg.weight_set = static_cast<crush_weight_set*>(calloc(sizeof(crush_weight_set), carg.weight_set_positions)); // initialize with canonical weights for (int pos = 0; pos < positions; ++pos) { carg.weight_set[pos].size = b->size; carg.weight_set[pos].weights = (__u32*)calloc(4, b->size); for (unsigned i = 0; i < b->size; ++i) { carg.weight_set[pos].weights[i] = sb->item_weights[i]; } } } else { carg.weight_set = NULL; carg.weight_set_positions = 0; } } return true; } void rm_choose_args(int64_t id) { auto p = choose_args.find(id); if (p != choose_args.end()) { destroy_choose_args(p->second); choose_args.erase(p); } } void choose_args_clear() { for (auto w : choose_args) destroy_choose_args(w.second); choose_args.clear(); } // remove choose_args for buckets that no longer exist, create them for new buckets void update_choose_args(CephContext *cct); // adjust choose_args_map weight, preserving the hierarchical summation // property. used by callers optimizing layouts by tweaking weights. int _choose_args_adjust_item_weight_in_bucket( CephContext *cct, crush_choose_arg_map cmap, int bucketid, int id, const std::vector<int>& weight, std::ostream *ss); int choose_args_adjust_item_weight( CephContext *cct, crush_choose_arg_map cmap, int id, const std::vector<int>& weight, std::ostream *ss); int choose_args_adjust_item_weightf( CephContext *cct, crush_choose_arg_map cmap, int id, const std::vector<double>& weightf, std::ostream *ss) { std::vector<int> weight(weightf.size()); for (unsigned i = 0; i < weightf.size(); ++i) { weight[i] = (int)(weightf[i] * (double)0x10000); } return choose_args_adjust_item_weight(cct, cmap, id, weight, ss); } int get_choose_args_positions(crush_choose_arg_map cmap) { // infer positions from other buckets for (unsigned j = 0; j < cmap.size; ++j) { if (cmap.args[j].weight_set_positions) { return cmap.args[j].weight_set_positions; } } return 1; } template<typename WeightVector> void do_rule(int rule, int x, std::vector<int>& out, int maxout, const WeightVector& weight, uint64_t choose_args_index) const { int rawout[maxout]; char work[crush_work_size(crush, maxout)]; crush_init_workspace(crush, work); crush_choose_arg_map arg_map = choose_args_get_with_fallback( choose_args_index); int numrep = crush_do_rule(crush, rule, x, rawout, maxout, std::data(weight), std::size(weight), work, arg_map.args); if (numrep < 0) numrep = 0; out.resize(numrep); for (int i=0; i<numrep; i++) out[i] = rawout[i]; } int _choose_type_stack( CephContext *cct, const std::vector<std::pair<int,int>>& stack, const std::set<int>& overfull, const std::vector<int>& underfull, const std::vector<int>& more_underfull, const std::vector<int>& orig, std::vector<int>::const_iterator& i, std::set<int>& used, std::vector<int> *pw, int root_bucket, int rule) const; int try_remap_rule( CephContext *cct, int rule, int maxout, const std::set<int>& overfull, const std::vector<int>& underfull, const std::vector<int>& more_underfull, const std::vector<int>& orig, std::vector<int> *out) const; void encode(ceph::buffer::list &bl, uint64_t features) const; void decode(ceph::buffer::list::const_iterator &blp); void decode_crush_bucket(crush_bucket** bptr, ceph::buffer::list::const_iterator &blp); void dump(ceph::Formatter *f) const; void dump_rules(ceph::Formatter *f) const; void dump_rule(int rule, ceph::Formatter *f) const; void dump_tunables(ceph::Formatter *f) const; void dump_choose_args(ceph::Formatter *f) const; void list_rules(ceph::Formatter *f) const; void list_rules(std::ostream *ss) const; void dump_tree(std::ostream *out, ceph::Formatter *f, const CrushTreeDumper::name_map_t& ws, bool show_shadow = false) const; void dump_tree(std::ostream *out, ceph::Formatter *f) { dump_tree(out, f, CrushTreeDumper::name_map_t()); } void dump_tree(ceph::Formatter *f, const CrushTreeDumper::name_map_t& ws) const; static void generate_test_instances(std::list<CrushWrapper*>& o); int get_osd_pool_default_crush_replicated_rule(CephContext *cct); static bool is_valid_crush_name(const std::string& s); static bool is_valid_crush_loc(CephContext *cct, const std::map<std::string,std::string>& loc); }; WRITE_CLASS_ENCODER_FEATURES(CrushWrapper) #endif
49,090
29.991793
109
h
null
ceph-main/src/crush/builder.h
#ifndef CEPH_CRUSH_BUILDER_H #define CEPH_CRUSH_BUILDER_H #include "include/int_types.h" struct crush_bucket; struct crush_choose_arg; struct crush_map; struct crush_rule; /** @ingroup API * * Allocate a crush_map with __malloc(3)__ and initialize it. The * caller is responsible for deallocating the crush_map with * crush_destroy(). * * The content of the allocated crush_map is set with * set_optimal_crush_map(). The caller is responsible for setting each * tunable in the __crush_map__ for backward compatibility or mapping * stability. * * @returns a pointer to the newly created crush_map or NULL */ extern struct crush_map *crush_create(); /** @ingroup API * * Analyze the content of __map__ and set the internal values required * before it can be used to map values with crush_do_rule(). The caller * must make sure it is run before crush_do_rule() and after any * function that modifies the __map__ (crush_add_bucket(), etc.). * * @param map the crush_map */ extern void crush_finalize(struct crush_map *map); /* rules */ /** @ingroup API * * Allocate an empty crush_rule structure large enough to store __len__ steps. * Steps can be added to a rule via crush_rule_set_step(). * * The caller is responsible for deallocating the returned pointer via * crush_destroy_rule(). * * If __malloc(3)__ fails, return NULL. * * @param len number of steps in the rule * @param type user defined value * * @returns a pointer to the newly created rule or NULL */ extern struct crush_rule *crush_make_rule(int len, int type); /** @ingroup API * * Set the __pos__ step of the __rule__ to an operand and up to two arguments. * The value of the operand __op__ determines if the arguments are used and how: * * - __CRUSH_RULE_NOOP__ do nothing. * - __CRUSH_RULE_TAKE__ select the __arg1__ item * - __CRUSH_RULE_EMIT__ append the selection to the results and clear * the selection * * - __CRUSH_RULE_CHOOSE_FIRSTN__ and __CRUSH_RULE_CHOOSE_INDEP__ * recursively explore each bucket currently selected, looking for * __arg1__ items of type __arg2__ and select them. * - __CRUSH_RULE_CHOOSELEAF_FIRSTN__ and __CRUSH_RULE_CHOOSELEAF_INDEP__ * recursively explore each bucket currently selected, looking for * __arg1__ leaves within all the buckets of type __arg2__ and * select them. * * In all __CHOOSE__ steps, if __arg1__ is less than or equal to zero, * the number of items to select is equal to the __max_result__ argument * of crush_do_rule() minus __arg1__. It is common to set __arg1__ to zero * to select as many items as requested by __max_result__. * * - __CRUSH_RULE_SET_CHOOSE_TRIES__ and __CRUSH_RULE_SET_CHOOSELEAF_TRIES__ * * The CHOOSE_FIRSTN and CHOOSE_INDEP rule step look for buckets of * a given type, randomly selecting them. If they are unlucky and * find the same bucket twice, they will try N+1 times (N being the * value of the choose_total_tries tunable). If there is a previous * SET_CHOOSE_TRIES step in the same rule, it will try C times * instead (C being the value of the argument of the * SET_CHOOSE_TRIES step). * * Note: the __choose_total_tries__ tunable defined in crush_map is * the number of retry, not the number of tries. The number of tries * is the number of retry+1. The SET_CHOOSE_TRIES rule step sets the * number of tries and does not need the + 1. This confusing * difference is inherited from an off-by-one bug from years ago. * * The CHOOSELEAF_FIRSTN and CHOOSELEAF_INDEP rule step do the same * as CHOOSE_FIRSTN and CHOOSE_INDEP but also recursively explore * each bucket found, looking for a single device. The same device * may be found in two different buckets because the crush map is * not a strict hierarchy, it is a DAG. When such a collision * happens, they will try again. The number of times they try to * find a non colliding device is: * * - If FIRSTN and there is no previous SET_CHOOSELEAF_TRIES rule * step: try N + 1 times (N being the value of the * __choose_total_tries__ tunable defined in crush_map) * * - If FIRSTN and there is a previous SET_CHOOSELEAF_TRIES rule * step: try P times (P being the value of the argument of the * SET_CHOOSELEAF_TRIES rule step) * * - If INDEP and there is no previous SET_CHOOSELEAF_TRIES rule * step: try 1 time. * * - If INDEP and there is a previous SET_CHOOSELEAF_TRIES rule step: try * P times (P being the value of the argument of the SET_CHOOSELEAF_TRIES * rule step) * * @param rule the rule in which the step is inserted * @param pos the zero based step index * @param op one of __CRUSH_RULE_NOOP__, __CRUSH_RULE_TAKE__, __CRUSH_RULE_CHOOSE_FIRSTN__, __CRUSH_RULE_CHOOSE_INDEP__, __CRUSH_RULE_CHOOSELEAF_FIRSTN__, __CRUSH_RULE_CHOOSELEAF_INDEP__, __CRUSH_RULE_SET_CHOOSE_TRIES__, __CRUSH_RULE_SET_CHOOSELEAF_TRIES__ or __CRUSH_RULE_EMIT__ * @param arg1 first argument for __op__ * @param arg2 second argument for __op__ */ extern void crush_rule_set_step(struct crush_rule *rule, int pos, int op, int arg1, int arg2); /** @ingroup API * * Add the __rule__ into the crush __map__ and assign it the * __ruleno__ unique identifier. If __ruleno__ is -1, the function will * assign the lowest available identifier. The __ruleno__ value must be * a positive integer lower than __CRUSH_MAX_RULES__. * * - return -ENOSPC if the rule identifier is >= __CRUSH_MAX_RULES__ * - return -ENOMEM if __realloc(3)__ fails to expand the array of * rules in the __map__ * * @param map the crush_map * @param rule the rule to add to the __map__ * @param ruleno a positive integer < __CRUSH_MAX_RULES__ or -1 * * @returns the rule unique identifier on success, < 0 on error */ extern int crush_add_rule(struct crush_map *map, struct crush_rule *rule, int ruleno); /* buckets */ extern int crush_get_next_bucket_id(struct crush_map *map); /** @ingroup API * * Add __bucket__ into the crush __map__ and assign it the * __bucketno__ unique identifier. If __bucketno__ is 0, the function * will assign the lowest available identifier. The bucket identifier * must be a negative integer. The bucket identifier is returned via * __idout__. * * - return -ENOMEM if __realloc(3)__ fails to expand the array of * buckets in the __map__ * - return -EEXIST if the __bucketno__ identifier is already assigned * to another bucket. * * @param[in] map the crush_map * @param[in] bucketno the bucket unique identifier or 0 * @param[in] bucket the bucket to add to the __map__ * @param[out] idout a pointer to the bucket identifier * * @returns 0 on success, < 0 on error */ extern int crush_add_bucket(struct crush_map *map, int bucketno, struct crush_bucket *bucket, int *idout); /** @ingroup API * * Allocate a crush_bucket with __malloc(3)__ and initialize it. The * content of the bucket is filled with __size__ items from * __items__. The item selection is set to use __alg__ which is one of * ::CRUSH_BUCKET_UNIFORM , ::CRUSH_BUCKET_LIST or * ::CRUSH_BUCKET_STRAW2. The initial __items__ are assigned a * weight from the __weights__ array, depending on the value of * __alg__. If __alg__ is ::CRUSH_BUCKET_UNIFORM, all items are set * to have a weight equal to __weights[0]__, otherwise the weight of * __items[x]__ is set to be the value of __weights[x]__. * * The caller is responsible for deallocating the returned pointer via * crush_destroy_bucket(). * * @param map __unused__ * @param alg algorithm for item selection * @param hash always set to CRUSH_HASH_RJENKINS1 * @param type user defined bucket type * @param size of the __items__ array * @param items array of __size__ items * @param weights the weight of each item in __items__, depending on __alg__ * * @returns a pointer to the newly created bucket or NULL */ struct crush_bucket *crush_make_bucket(struct crush_map *map, int alg, int hash, int type, int size, int *items, int *weights); extern struct crush_choose_arg *crush_make_choose_args(struct crush_map *map, int num_positions); extern void crush_destroy_choose_args(struct crush_choose_arg *args); /** @ingroup API * * Add __item__ to __bucket__ with __weight__. The weight of the new * item is added to the weight of the bucket so that it reflects * the total weight of all items. * * If __bucket->alg__ is ::CRUSH_BUCKET_UNIFORM, the value of __weight__ must be equal to * __(struct crush_bucket_uniform *)bucket->item_weight__. * * - return -ENOMEM if the __bucket__ cannot be resized with __realloc(3)__. * - return -ERANGE if adding __weight__ to the weight of the bucket overflows. * - return -EINVAL if __bucket->alg__ is ::CRUSH_BUCKET_UNIFORM and * the __weight__ is not equal to __(struct crush_bucket_uniform *)bucket->item_weight__. * - return -1 if the value of __bucket->alg__ is unknown. * * @returns 0 on success, < 0 on error */ extern int crush_bucket_add_item(struct crush_map *map, struct crush_bucket *bucket, int item, int weight); /** @ingroup API * * If __bucket->alg__ is ::CRUSH_BUCKET_UNIFORM, * __(struct crush_bucket_uniform *)bucket->item_weight__ is set to __weight__ and the * weight of the bucket is set to be the number of items in the bucket times the weight. * The return value is the difference between the new bucket weight and the former * bucket weight. The __item__ argument is ignored. * * If __bucket->alg__ is different from ::CRUSH_BUCKET_UNIFORM, * set the __weight__ of __item__ in __bucket__. The former weight of the * item is subtracted from the weight of the bucket and the new weight is added. * The return value is the difference between the new item weight and the former * item weight. * * @returns the difference between the new weight and the former weight */ extern int crush_bucket_adjust_item_weight(struct crush_map *map, struct crush_bucket *bucket, int item, int weight); /** @ingroup API * * Recursively update the weight of __bucket__ and its children, deep * first. The __bucket__ weight is set to the sum of the weight of the * items it contains. * * - return -ERANGE if the sum of the weight of the items in __bucket__ overflows. * - return -1 if the value of __bucket->alg__ is unknown. * * @param map a crush_map containing __bucket__ * @param bucket the root of the tree to reweight * @returns 0 on success, < 0 on error */ extern int crush_reweight_bucket(struct crush_map *map, struct crush_bucket *bucket); /** @ingroup API * * Remove __bucket__ from __map__ and deallocate it via crush_destroy_bucket(). * __assert(3)__ that __bucket__ is in __map__. The caller is responsible for * making sure the bucket is not the child of any other bucket in the __map__. * * @param map a crush_map containing __bucket__ * @param bucket the bucket to remove from __map__ * @returns 0 */ extern int crush_remove_bucket(struct crush_map *map, struct crush_bucket *bucket); /** @ingroup API * * Remove __item__ from __bucket__ and subtract the item weight from * the bucket weight. If the weight of the item is greater than the * weight of the bucket, silently set the bucket weight to zero. * * - return -ENOMEM if the __bucket__ cannot be sized down with __realloc(3)__. * - return -1 if the value of __bucket->alg__ is unknown. * * @param map __unused__ * @param bucket the bucket from which __item__ is removed * @param item the item to remove from __bucket__ * @returns 0 on success, < 0 on error */ extern int crush_bucket_remove_item(struct crush_map *map, struct crush_bucket *bucket, int item); struct crush_bucket_uniform * crush_make_uniform_bucket(int hash, int type, int size, int *items, int item_weight); struct crush_bucket_list* crush_make_list_bucket(int hash, int type, int size, int *items, int *weights); struct crush_bucket_tree* crush_make_tree_bucket(int hash, int type, int size, int *items, /* in leaf order */ int *weights); struct crush_bucket_straw * crush_make_straw_bucket(struct crush_map *map, int hash, int type, int size, int *items, int *weights); extern int crush_addition_is_unsafe(__u32 a, __u32 b); extern int crush_multiplication_is_unsafe(__u32 a, __u32 b); /** @ingroup API * * Set the __map__ tunables to implement the most ancient behavior, * for backward compatibility purposes only. * * - choose_local_tries == 2 * - choose_local_fallback_tries == 5 * - choose_total_tries == 19 * - chooseleaf_descend_once == 0 * - chooseleaf_vary_r == 0 * - straw_calc_version == 0 * - chooseleaf_stable = 0 * * See the __crush_map__ documentation for more information about * each tunable. * * @param map a crush_map */ extern void set_legacy_crush_map(struct crush_map *map); /** @ingroup API * * Set the __map__ tunables to implement the optimal behavior. These * are the values set by crush_create(). It does not guarantee a * stable mapping after an upgrade. * * For instance when a bug is fixed it may significantly change the * mapping. In that case a new tunable (say tunable_new) is added so * the caller can control when the bug fix is activated. The * set_optimal_crush_map() function will always set all tunables, * including tunable_new, to fix all bugs even if it means changing * the mapping. If the caller needs fine grained control on the * tunables to upgrade to a new version without changing the mapping, * it needs to set the __crush_map__ tunables individually. * * See the __crush_map__ documentation for more information about * each tunable. * * @param map a crush_map */ extern void set_optimal_crush_map(struct crush_map *map); #endif
13,701
40.147147
279
h
null
ceph-main/src/crush/crush.h
#ifndef CEPH_CRUSH_CRUSH_H #define CEPH_CRUSH_CRUSH_H #ifdef __KERNEL__ # include <linux/types.h> #else # include "crush_compat.h" #endif /* * CRUSH is a pseudo-random data distribution algorithm that * efficiently distributes input values (typically, data objects) * across a heterogeneous, structured storage cluster. * * The algorithm was originally described in detail in this paper * (although the algorithm has evolved somewhat since then): * * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf * * LGPL-2.1 or LGPL-3.0 */ #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ #define CRUSH_MAX_RULES (1<<8) /* max crush rule id */ #define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u) #define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u) #define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */ /** @ingroup API * The equivalent of NULL for an item, i.e. the absence of an item. */ #define CRUSH_ITEM_NONE 0x7fffffff /* * CRUSH uses user-defined "rules" to describe how inputs should be * mapped to devices. A rule consists of sequence of steps to perform * to generate the set of output devices. */ struct crush_rule_step { __u32 op; __s32 arg1; __s32 arg2; }; /** @ingroup API */ enum crush_opcodes { /*! do nothing */ CRUSH_RULE_NOOP = 0, CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */ CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */ /* arg2 = type */ CRUSH_RULE_CHOOSE_INDEP = 3, /* same */ CRUSH_RULE_EMIT = 4, /* no args */ CRUSH_RULE_CHOOSELEAF_FIRSTN = 6, CRUSH_RULE_CHOOSELEAF_INDEP = 7, CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */ CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */ CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10, CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11, CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12, CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13 }; /* * for specifying choose num (arg1) relative to the max parameter * passed to do_rule */ #define CRUSH_CHOOSE_N 0 #define CRUSH_CHOOSE_N_MINUS(x) (-(x)) struct crush_rule { __u32 len; __u8 __unused_was_rule_mask_ruleset; __u8 type; __u8 deprecated_min_size; __u8 deprecated_max_size; struct crush_rule_step steps[0]; }; #define crush_rule_size(len) (sizeof(struct crush_rule) + \ (len)*sizeof(struct crush_rule_step)) /* * A bucket is a named container of other items (either devices or * other buckets). */ /** @ingroup API * * Items within a bucket are chosen with crush_do_rule() using one of * three algorithms representing a tradeoff between performance and * reorganization efficiency. If you are unsure of which bucket type * to use, we recommend using ::CRUSH_BUCKET_STRAW2. * * The table summarizes how the speed of each option measures up * against mapping stability when items are added or removed. * * Bucket Alg Speed Additions Removals * ------------------------------------------------ * uniform O(1) poor poor * list O(n) optimal poor * straw2 O(n) optimal optimal */ enum crush_algorithm { /*! * Devices are rarely added individually in a large system. * Instead, new storage is typically deployed in blocks of identical * devices, often as an additional shelf in a server rack or perhaps * an entire cabinet. Devices reaching their end of life are often * similarly decommissioned as a set (individual failures aside), * making it natural to treat them as a unit. CRUSH uniform buckets * are used to represent an identical set of devices in such * circumstances. The key advantage in doing so is performance * related: CRUSH can map replicas into uniform buckets in constant * time. In cases where the uniformity restrictions are not * appropriate, other bucket types can be used. If the size of a * uniform bucket changes, there is a complete reshuffling of data * between devices, much like conventional hash-based distribution * strategies. */ CRUSH_BUCKET_UNIFORM = 1, /*! * List buckets structure their contents as a linked list, and * can contain items with arbitrary weights. To place a * replica, CRUSH begins at the head of the list with the most * recently added item and compares its weight to the sum of * all remaining items' weights. Depending on the value of * hash( x , r , item), either the current item is chosen with * the appropriate probability, or the process continues * recursively down the list. This is a natural and intuitive * choice for an expanding cluster: either an object is * relocated to the newest device with some appropriate * probability, or it remains on the older devices as before. * The result is optimal data migration when items are added * to the bucket. Items removed from the middle or tail of the * list, however, can result in a significant amount of * unnecessary movement, making list buckets most suitable for * circumstances in which they never (or very rarely) shrink. */ CRUSH_BUCKET_LIST = 2, /*! @cond INTERNAL */ CRUSH_BUCKET_TREE = 3, CRUSH_BUCKET_STRAW = 4, /*! @endcond */ /*! * List and tree buckets are structured such that a limited * number of hash values need to be calculated and compared to * weights in order to select a bucket item. In doing so, * they divide and conquer in a way that either gives certain * items precedence (e. g., those at the beginning of a list) * or obviates the need to consider entire subtrees of items * at all. That improves the performance of the replica * placement process, but can also introduce suboptimal * reorganization behavior when the contents of a bucket * change due an addition, removal, or re-weighting of an * item. * * The straw2 bucket type allows all items to fairly "compete" * against each other for replica placement through a process * analogous to a draw of straws. To place a replica, a straw * of random length is drawn for each item in the bucket. The * item with the longest straw wins. The length of each straw * is initially a value in a fixed range. Each straw length * is scaled by a factor based on the item's weight so that * heavily weighted items are more likely to win the draw. * Although this process is almost twice as slow (on average) * than a list bucket and even slower than a tree bucket * (which scales logarithmically), straw2 buckets result in * optimal data movement between nested items when modified. */ CRUSH_BUCKET_STRAW2 = 5, }; extern const char *crush_bucket_alg_name(int alg); /* * although tree was a legacy algorithm, it has been buggy, so * exclude it. */ #define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \ (1 << CRUSH_BUCKET_UNIFORM) | \ (1 << CRUSH_BUCKET_LIST) | \ (1 << CRUSH_BUCKET_STRAW)) /** @ingroup API * * A bucket contains __size__ __items__ which are either positive * numbers or negative numbers that reference other buckets and is * uniquely identified with __id__ which is a negative number. The * __weight__ of a bucket is the cumulative weight of all its * children. A bucket is assigned a ::crush_algorithm that is used by * crush_do_rule() to draw an item depending on its weight. A bucket * can be assigned a strictly positive (> 0) __type__ defined by the * caller. The __type__ can be used by crush_do_rule(), when it is * given as an argument of a rule step. * * A pointer to crush_bucket can safely be cast into the following * structure, depending on the value of __alg__: * * - __alg__ == ::CRUSH_BUCKET_UNIFORM cast to crush_bucket_uniform * - __alg__ == ::CRUSH_BUCKET_LIST cast to crush_bucket_list * - __alg__ == ::CRUSH_BUCKET_STRAW2 cast to crush_bucket_straw2 * * The weight of each item depends on the algorithm and the * information about it is available in the corresponding structure * (crush_bucket_uniform, crush_bucket_list or crush_bucket_straw2). * * See crush_map for more information on how __id__ is used * to reference the bucket. */ struct crush_bucket { __s32 id; /*!< bucket identifier, < 0 and unique within a crush_map */ __u16 type; /*!< > 0 bucket type, defined by the caller */ __u8 alg; /*!< the item selection ::crush_algorithm */ /*! @cond INTERNAL */ __u8 hash; /* which hash function to use, CRUSH_HASH_* */ /*! @endcond */ __u32 weight; /*!< 16.16 fixed point cumulated children weight */ __u32 size; /*!< size of the __items__ array */ __s32 *items; /*!< array of children: < 0 are buckets, >= 0 items */ }; /** @ingroup API * * Replacement weights for each item in a bucket. The size of the * array must be exactly the size of the straw2 bucket, just as the * item_weights array. * */ struct crush_weight_set { __u32 *weights; /*!< 16.16 fixed point weights in the same order as items */ __u32 size; /*!< size of the __weights__ array */ }; /** @ingroup API * * Replacement weights and ids for a given straw2 bucket, for * placement purposes. * * When crush_do_rule() chooses the Nth item from a straw2 bucket, the * replacement weights found at __weight_set[N]__ are used instead of * the weights from __item_weights__. If __N__ is greater than * __weight_set_positions__, the weights found at __weight_set_positions-1__ are * used instead. For instance if __weight_set__ is: * * [ [ 0x10000, 0x20000 ], // position 0 * [ 0x20000, 0x40000 ] ] // position 1 * * choosing the 0th item will use position 0 weights [ 0x10000, 0x20000 ] * choosing the 1th item will use position 1 weights [ 0x20000, 0x40000 ] * choosing the 2th item will use position 1 weights [ 0x20000, 0x40000 ] * etc. * */ struct crush_choose_arg { __s32 *ids; /*!< values to use instead of items */ __u32 ids_size; /*!< size of the __ids__ array */ struct crush_weight_set *weight_set; /*!< weight replacements for a given position */ __u32 weight_set_positions; /*!< size of the __weight_set__ array */ }; /** @ingroup API * * Replacement weights and ids for each bucket in the crushmap. The * __size__ of the __args__ array must be exactly the same as the * __map->max_buckets__. * * The __crush_choose_arg__ at index N will be used when choosing * an item from the bucket __map->buckets[N]__ bucket, provided it * is a straw2 bucket. * */ struct crush_choose_arg_map { struct crush_choose_arg *args; /*!< replacement for each bucket in the crushmap */ __u32 size; /*!< size of the __args__ array */ }; /** @ingroup API * The weight of each item in the bucket when * __h.alg__ == ::CRUSH_BUCKET_UNIFORM. */ struct crush_bucket_uniform { struct crush_bucket h; /*!< generic bucket information */ __u32 item_weight; /*!< 16.16 fixed point weight for each item */ }; /** @ingroup API * The weight of each item in the bucket when * __h.alg__ == ::CRUSH_BUCKET_LIST. * * The weight of __h.items[i]__ is __item_weights[i]__ for i in * [0,__h.size__[. The __sum_weight__[i] is the sum of the __item_weights[j]__ * for j in [0,i[. * */ struct crush_bucket_list { struct crush_bucket h; /*!< generic bucket information */ __u32 *item_weights; /*!< 16.16 fixed point weight for each item */ __u32 *sum_weights; /*!< 16.16 fixed point sum of the weights */ }; struct crush_bucket_tree { struct crush_bucket h; /* note: h.size is _tree_ size, not number of actual items */ __u8 num_nodes; __u32 *node_weights; }; struct crush_bucket_straw { struct crush_bucket h; __u32 *item_weights; /* 16-bit fixed point */ __u32 *straws; /* 16-bit fixed point */ }; /** @ingroup API * The weight of each item in the bucket when * __h.alg__ == ::CRUSH_BUCKET_STRAW2. * * The weight of __h.items[i]__ is __item_weights[i]__ for i in * [0,__h.size__]. */ struct crush_bucket_straw2 { struct crush_bucket h; /*!< generic bucket information */ __u32 *item_weights; /*!< 16.16 fixed point weight for each item */ }; /** @ingroup API * * A crush map define a hierarchy of crush_bucket that end with leaves * (buckets and leaves are called items) and a set of crush_rule to * map an integer to items with the crush_do_rule() function. * */ struct crush_map { /*! An array of crush_bucket pointers of size __max_buckets__. * An element of the array may be NULL if the bucket was removed with * crush_remove_bucket(). The buckets must be added with crush_add_bucket(). * The bucket found at __buckets[i]__ must have a crush_bucket.id == -1-i. */ struct crush_bucket **buckets; /*! An array of crush_rule pointers of size __max_rules__. * An element of the array may be NULL if the rule was removed (there is * no API to do so but there may be one in the future). The rules must be added * with crush_add_rule(). */ struct crush_rule **rules; __s32 max_buckets; /*!< the size of __buckets__ */ __u32 max_rules; /*!< the size of __rules__ */ /*! The value of the highest item stored in the crush_map + 1 */ __s32 max_devices; /*! Backward compatibility tunable. It implements a bad solution * and must always be set to 0 except for backward compatibility * purposes */ __u32 choose_local_tries; /*! Backward compatibility tunable. It implements a bad solution * and must always be set to 0 except for backward compatibility * purposes */ __u32 choose_local_fallback_tries; /*! Tunable. The default value when the CHOOSE_TRIES or * CHOOSELEAF_TRIES steps are omitted in a rule. See the * documentation for crush_rule_set_step() for more * information */ __u32 choose_total_tries; /*! Backward compatibility tunable. It should always be set * to 1 except for backward compatibility. Implemented in 2012 * it was generalized late 2013 and is mostly unused except * in one border case, reason why it must be set to 1. * * Attempt chooseleaf inner descent once for firstn mode; on * reject retry outer descent. Note that this does *not* * apply to a collision: in that case we will retry as we * used to. */ __u32 chooseleaf_descend_once; /*! Backward compatibility tunable. It is a fix for bad * mappings implemented in 2014 at * https://github.com/ceph/ceph/pull/1185. It should always * be set to 1 except for backward compatibility. * * If non-zero, feed r into chooseleaf, bit-shifted right by * (r-1) bits. a value of 1 is best for new clusters. for * legacy clusters that want to limit reshuffling, a value of * 3 or 4 will make the mappings line up a bit better with * previous mappings. */ __u8 chooseleaf_vary_r; /*! Backward compatibility tunable. It is an improvement that * avoids unnecessary mapping changes, implemented at * https://github.com/ceph/ceph/pull/6572 and explained in * this post: "chooseleaf may cause some unnecessary pg * migrations" in October 2015 * https://www.mail-archive.com/[email protected]/msg26075.html * It should always be set to 1 except for backward compatibility. */ __u8 chooseleaf_stable; /*! @cond INTERNAL */ /* This value is calculated after decode or construction by the builder. It is exposed here (rather than having a 'build CRUSH working space' function) so that callers can reserve a static buffer, allocate space on the stack, or otherwise avoid calling into the heap allocator if they want to. The size of the working space depends on the map, while the size of the scratch vector passed to the mapper depends on the size of the desired result set. Nothing stops the caller from allocating both in one swell foop and passing in two points, though. */ size_t working_size; #ifndef __KERNEL__ /*! @endcond */ /*! Backward compatibility tunable. It is a fix for the straw * scaler values for the straw algorithm which is deprecated * (straw2 replaces it) implemented at * https://github.com/ceph/ceph/pull/3057. It should always * be set to 1 except for backward compatibility. * */ __u8 straw_calc_version; /*! @cond INTERNAL */ /* * allowed bucket algs is a bitmask, here the bit positions * are CRUSH_BUCKET_*. note that these are *bits* and * CRUSH_BUCKET_* values are not, so we need to or together (1 * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to * minimize confusion (bucket type values start at 1). */ __u32 allowed_bucket_algs; __u32 *choose_tries; #endif /*! @endcond */ }; /* crush.c */ /** @ingroup API * * Return the 16.16 fixed point weight of the item at __pos__ (zero * based index) within the bucket __b__. If __pos__ is negative or * greater or equal to the number of items in the bucket, return 0. * * @param b the bucket containing items * @param pos the zero based index of the item * * @returns the 16.16 fixed point item weight */ extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos); extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); extern void crush_destroy_bucket_list(struct crush_bucket_list *b); extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b); extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b); /** @ingroup API * * Deallocate a bucket created via crush_add_bucket(). * * @param b the bucket to deallocate */ extern void crush_destroy_bucket(struct crush_bucket *b); /** @ingroup API * * Deallocate a rule created via crush_add_rule(). * * @param r the rule to deallocate */ extern void crush_destroy_rule(struct crush_rule *r); /** @ingroup API * * Deallocate the __map__, previously allocated with crush_create. * * @param map the crush map */ extern void crush_destroy(struct crush_map *map); static inline int crush_calc_tree_node(int i) { return ((i+1) << 1)-1; } static inline const char *crush_alg_name(int alg) { switch (alg) { case CRUSH_BUCKET_UNIFORM: return "uniform"; case CRUSH_BUCKET_LIST: return "list"; case CRUSH_BUCKET_TREE: return "tree"; case CRUSH_BUCKET_STRAW: return "straw"; case CRUSH_BUCKET_STRAW2: return "straw2"; default: return "unknown"; } } /* --------------------------------------------------------------------- Private --------------------------------------------------------------------- */ /* These data structures are private to the CRUSH implementation. They are exposed in this header file because builder needs their definitions to calculate the total working size. Moving this out of the crush map allow us to treat the CRUSH map as immutable within the mapper and removes the requirement for a CRUSH map lock. */ struct crush_work_bucket { __u32 perm_x; /* @x for which *perm is defined */ __u32 perm_n; /* num elements of *perm that are permuted/defined */ __u32 *perm; /* Permutation of the bucket's items */ } __attribute__ ((packed)); struct crush_work { struct crush_work_bucket **work; /* Per-bucket working store */ }; #endif
20,023
36.081481
88
h
null
ceph-main/src/crush/crush_compat.h
#ifndef CEPH_CRUSH_COMPAT_H #define CEPH_CRUSH_COMPAT_H #include "include/int_types.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> /* asm-generic/bug.h */ #define BUG_ON(x) assert(!(x)) /* linux/kernel.h */ #define U8_MAX ((__u8)~0U) #define S8_MAX ((__s8)(U8_MAX>>1)) #define S8_MIN ((__s8)(-S8_MAX - 1)) #define U16_MAX ((__u16)~0U) #define S16_MAX ((__s16)(U16_MAX>>1)) #define S16_MIN ((__s16)(-S16_MAX - 1)) #define U32_MAX ((__u32)~0U) #define S32_MAX ((__s32)(U32_MAX>>1)) #define S32_MIN ((__s32)(-S32_MAX - 1)) #define U64_MAX ((__u64)~0ULL) #define S64_MAX ((__s64)(U64_MAX>>1)) #define S64_MIN ((__s64)(-S64_MAX - 1)) /* linux/math64.h */ #define div64_s64(dividend, divisor) ((dividend) / (divisor)) /* linux/slab.h */ #define kmalloc(size, flags) malloc(size) #define kfree(x) do { if (x) free(x); } while (0) #endif /* CEPH_CRUSH_COMPAT_H */
914
21.875
61
h
null
ceph-main/src/crush/crush_ln_table.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2015 Intel Corporation All Rights Reserved * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_CRUSH_LN_H #define CEPH_CRUSH_LN_H #ifdef __KERNEL__ # include <linux/types.h> #else # include "crush_compat.h" #endif /* * RH_LH_tbl[2*k] = 2^48/(1.0+k/128.0) * RH_LH_tbl[2*k+1] = 2^48*log2(1.0+k/128.0) */ static __s64 __RH_LH_tbl[128*2+2] = { 0x0001000000000000ll, 0x0000000000000000ll, 0x0000fe03f80fe040ll, 0x000002dfca16dde1ll, 0x0000fc0fc0fc0fc1ll, 0x000005b9e5a170b4ll, 0x0000fa232cf25214ll, 0x0000088e68ea899all, 0x0000f83e0f83e0f9ll, 0x00000b5d69bac77ell, 0x0000f6603d980f67ll, 0x00000e26fd5c8555ll, 0x0000f4898d5f85bcll, 0x000010eb389fa29fll, 0x0000f2b9d6480f2cll, 0x000013aa2fdd27f1ll, 0x0000f0f0f0f0f0f1ll, 0x00001663f6fac913ll, 0x0000ef2eb71fc435ll, 0x00001918a16e4633ll, 0x0000ed7303b5cc0fll, 0x00001bc84240adabll, 0x0000ebbdb2a5c162ll, 0x00001e72ec117fa5ll, 0x0000ea0ea0ea0ea1ll, 0x00002118b119b4f3ll, 0x0000e865ac7b7604ll, 0x000023b9a32eaa56ll, 0x0000e6c2b4481cd9ll, 0x00002655d3c4f15cll, 0x0000e525982af70dll, 0x000028ed53f307eell, 0x0000e38e38e38e39ll, 0x00002b803473f7adll, 0x0000e1fc780e1fc8ll, 0x00002e0e85a9de04ll, 0x0000e070381c0e08ll, 0x0000309857a05e07ll, 0x0000dee95c4ca038ll, 0x0000331dba0efce1ll, 0x0000dd67c8a60dd7ll, 0x0000359ebc5b69d9ll, 0x0000dbeb61eed19dll, 0x0000381b6d9bb29bll, 0x0000da740da740dbll, 0x00003a93dc9864b2ll, 0x0000d901b2036407ll, 0x00003d0817ce9cd4ll, 0x0000d79435e50d7all, 0x00003f782d7204d0ll, 0x0000d62b80d62b81ll, 0x000041e42b6ec0c0ll, 0x0000d4c77b03531ell, 0x0000444c1f6b4c2dll, 0x0000d3680d3680d4ll, 0x000046b016ca47c1ll, 0x0000d20d20d20d21ll, 0x000049101eac381cll, 0x0000d0b69fcbd259ll, 0x00004b6c43f1366all, 0x0000cf6474a8819fll, 0x00004dc4933a9337ll, 0x0000ce168a772509ll, 0x0000501918ec6c11ll, 0x0000cccccccccccdll, 0x00005269e12f346ell, 0x0000cb8727c065c4ll, 0x000054b6f7f1325all, 0x0000ca4587e6b750ll, 0x0000570068e7ef5all, 0x0000c907da4e8712ll, 0x000059463f919deell, 0x0000c7ce0c7ce0c8ll, 0x00005b8887367433ll, 0x0000c6980c6980c7ll, 0x00005dc74ae9fbecll, 0x0000c565c87b5f9ell, 0x00006002958c5871ll, 0x0000c4372f855d83ll, 0x0000623a71cb82c8ll, 0x0000c30c30c30c31ll, 0x0000646eea247c5cll, 0x0000c1e4bbd595f7ll, 0x000066a008e4788cll, 0x0000c0c0c0c0c0c1ll, 0x000068cdd829fd81ll, 0x0000bfa02fe80bfbll, 0x00006af861e5fc7dll, 0x0000be82fa0be830ll, 0x00006d1fafdce20all, 0x0000bd6910470767ll, 0x00006f43cba79e40ll, 0x0000bc52640bc527ll, 0x00007164beb4a56dll, 0x0000bb3ee721a54ell, 0x000073829248e961ll, 0x0000ba2e8ba2e8bbll, 0x0000759d4f80cba8ll, 0x0000b92143fa36f6ll, 0x000077b4ff5108d9ll, 0x0000b81702e05c0cll, 0x000079c9aa879d53ll, 0x0000b70fbb5a19bfll, 0x00007bdb59cca388ll, 0x0000b60b60b60b61ll, 0x00007dea15a32c1bll, 0x0000b509e68a9b95ll, 0x00007ff5e66a0ffell, 0x0000b40b40b40b41ll, 0x000081fed45cbccbll, 0x0000b30f63528918ll, 0x00008404e793fb81ll, 0x0000b21642c8590cll, 0x000086082806b1d5ll, 0x0000b11fd3b80b12ll, 0x000088089d8a9e47ll, 0x0000b02c0b02c0b1ll, 0x00008a064fd50f2all, 0x0000af3addc680b0ll, 0x00008c01467b94bbll, 0x0000ae4c415c9883ll, 0x00008df988f4ae80ll, 0x0000ad602b580ad7ll, 0x00008fef1e987409ll, 0x0000ac7691840ac8ll, 0x000091e20ea1393ell, 0x0000ab8f69e2835all, 0x000093d2602c2e5fll, 0x0000aaaaaaaaaaabll, 0x000095c01a39fbd6ll, 0x0000a9c84a47a080ll, 0x000097ab43af59f9ll, 0x0000a8e83f5717c1ll, 0x00009993e355a4e5ll, 0x0000a80a80a80a81ll, 0x00009b79ffdb6c8bll, 0x0000a72f0539782all, 0x00009d5d9fd5010bll, 0x0000a655c4392d7cll, 0x00009f3ec9bcfb80ll, 0x0000a57eb50295fbll, 0x0000a11d83f4c355ll, 0x0000a4a9cf1d9684ll, 0x0000a2f9d4c51039ll, 0x0000a3d70a3d70a4ll, 0x0000a4d3c25e68dcll, 0x0000a3065e3fae7dll, 0x0000a6ab52d99e76ll, 0x0000a237c32b16d0ll, 0x0000a8808c384547ll, 0x0000a16b312ea8fdll, 0x0000aa5374652a1cll, 0x0000a0a0a0a0a0a1ll, 0x0000ac241134c4e9ll, 0x00009fd809fd80a0ll, 0x0000adf26865a8a1ll, 0x00009f1165e72549ll, 0x0000afbe7fa0f04dll, 0x00009e4cad23dd60ll, 0x0000b1885c7aa982ll, 0x00009d89d89d89d9ll, 0x0000b35004723c46ll, 0x00009cc8e160c3fcll, 0x0000b5157cf2d078ll, 0x00009c09c09c09c1ll, 0x0000b6d8cb53b0call, 0x00009b4c6f9ef03bll, 0x0000b899f4d8ab63ll, 0x00009a90e7d95bc7ll, 0x0000ba58feb2703all, 0x000099d722dabde6ll, 0x0000bc15edfeed32ll, 0x0000991f1a515886ll, 0x0000bdd0c7c9a817ll, 0x00009868c809868dll, 0x0000bf89910c1678ll, 0x000097b425ed097cll, 0x0000c1404eadf383ll, 0x000097012e025c05ll, 0x0000c2f5058593d9ll, 0x0000964fda6c0965ll, 0x0000c4a7ba58377cll, 0x000095a02568095bll, 0x0000c65871da59ddll, 0x000094f2094f2095ll, 0x0000c80730b00016ll, 0x0000944580944581ll, 0x0000c9b3fb6d0559ll, 0x0000939a85c4093all, 0x0000cb5ed69565afll, 0x000092f113840498ll, 0x0000cd07c69d8702ll, 0x0000924924924925ll, 0x0000ceaecfea8085ll, 0x000091a2b3c4d5e7ll, 0x0000d053f6d26089ll, 0x000090fdbc090fdcll, 0x0000d1f73f9c70c0ll, 0x0000905a38633e07ll, 0x0000d398ae817906ll, 0x00008fb823ee08fcll, 0x0000d53847ac00a6ll, 0x00008f1779d9fdc4ll, 0x0000d6d60f388e41ll, 0x00008e78356d1409ll, 0x0000d8720935e643ll, 0x00008dda5202376all, 0x0000da0c39a54804ll, 0x00008d3dcb08d3ddll, 0x0000dba4a47aa996ll, 0x00008ca29c046515ll, 0x0000dd3b4d9cf24bll, 0x00008c08c08c08c1ll, 0x0000ded038e633f3ll, 0x00008b70344a139cll, 0x0000e0636a23e2eell, 0x00008ad8f2fba939ll, 0x0000e1f4e5170d02ll, 0x00008a42f870566all, 0x0000e384ad748f0ell, 0x000089ae4089ae41ll, 0x0000e512c6e54998ll, 0x0000891ac73ae982ll, 0x0000e69f35065448ll, 0x0000888888888889ll, 0x0000e829fb693044ll, 0x000087f78087f781ll, 0x0000e9b31d93f98ell, 0x00008767ab5f34e5ll, 0x0000eb3a9f019750ll, 0x000086d905447a35ll, 0x0000ecc08321eb30ll, 0x0000864b8a7de6d2ll, 0x0000ee44cd59ffabll, 0x000085bf37612cefll, 0x0000efc781043579ll, 0x0000853408534086ll, 0x0000f148a170700all, 0x000084a9f9c8084bll, 0x0000f2c831e44116ll, 0x0000842108421085ll, 0x0000f446359b1353ll, 0x0000839930523fbfll, 0x0000f5c2afc65447ll, 0x000083126e978d50ll, 0x0000f73da38d9d4all, 0x0000828cbfbeb9a1ll, 0x0000f8b7140edbb1ll, 0x0000820820820821ll, 0x0000fa2f045e7832ll, 0x000081848da8faf1ll, 0x0000fba577877d7dll, 0x0000810204081021ll, 0x0000fd1a708bbe11ll, 0x0000808080808081ll, 0x0000fe8df263f957ll, 0x0000800000000000ll, 0x0000ffff00000000ll, }; /* * LL_tbl[k] = 2^48*log2(1.0+k/2^15) */ static __s64 __LL_tbl[256] = { 0x0000000000000000ull, 0x00000002e2a60a00ull, 0x000000070cb64ec5ull, 0x00000009ef50ce67ull, 0x0000000cd1e588fdull, 0x0000000fb4747e9cull, 0x0000001296fdaf5eull, 0x0000001579811b58ull, 0x000000185bfec2a1ull, 0x0000001b3e76a552ull, 0x0000001e20e8c380ull, 0x0000002103551d43ull, 0x00000023e5bbb2b2ull, 0x00000026c81c83e4ull, 0x00000029aa7790f0ull, 0x0000002c8cccd9edull, 0x0000002f6f1c5ef2ull, 0x0000003251662017ull, 0x0000003533aa1d71ull, 0x0000003815e8571aull, 0x0000003af820cd26ull, 0x0000003dda537faeull, 0x00000040bc806ec8ull, 0x000000439ea79a8cull, 0x0000004680c90310ull, 0x0000004962e4a86cull, 0x0000004c44fa8ab6ull, 0x0000004f270aaa06ull, 0x0000005209150672ull, 0x00000054eb19a013ull, 0x00000057cd1876fdull, 0x0000005aaf118b4aull, 0x0000005d9104dd0full, 0x0000006072f26c64ull, 0x0000006354da3960ull, 0x0000006636bc441aull, 0x0000006918988ca8ull, 0x0000006bfa6f1322ull, 0x0000006edc3fd79full, 0x00000071be0ada35ull, 0x000000749fd01afdull, 0x00000077818f9a0cull, 0x0000007a6349577aull, 0x0000007d44fd535eull, 0x0000008026ab8dceull, 0x00000083085406e3ull, 0x00000085e9f6beb2ull, 0x00000088cb93b552ull, 0x0000008bad2aeadcull, 0x0000008e8ebc5f65ull, 0x0000009170481305ull, 0x0000009451ce05d3ull, 0x00000097334e37e5ull, 0x0000009a14c8a953ull, 0x0000009cf63d5a33ull, 0x0000009fd7ac4a9dull, 0x000000a2b07f3458ull, 0x000000a59a78ea6aull, 0x000000a87bd699fbull, 0x000000ab5d2e8970ull, 0x000000ae3e80b8e3ull, 0x000000b11fcd2869ull, 0x000000b40113d818ull, 0x000000b6e254c80aull, 0x000000b9c38ff853ull, 0x000000bca4c5690cull, 0x000000bf85f51a4aull, 0x000000c2671f0c26ull, 0x000000c548433eb6ull, 0x000000c82961b211ull, 0x000000cb0a7a664dull, 0x000000cdeb8d5b82ull, 0x000000d0cc9a91c8ull, 0x000000d3ada20933ull, 0x000000d68ea3c1ddull, 0x000000d96f9fbbdbull, 0x000000dc5095f744ull, 0x000000df31867430ull, 0x000000e2127132b5ull, 0x000000e4f35632eaull, 0x000000e7d43574e6ull, 0x000000eab50ef8c1ull, 0x000000ed95e2be90ull, 0x000000f076b0c66cull, 0x000000f35779106aull, 0x000000f6383b9ca2ull, 0x000000f918f86b2aull, 0x000000fbf9af7c1aull, 0x000000feda60cf88ull, 0x00000101bb0c658cull, 0x000001049bb23e3cull, 0x000001077c5259afull, 0x0000010a5cecb7fcull, 0x0000010d3d81593aull, 0x000001101e103d7full, 0x00000112fe9964e4ull, 0x00000115df1ccf7eull, 0x00000118bf9a7d64ull, 0x0000011ba0126eadull, 0x0000011e8084a371ull, 0x0000012160f11bc6ull, 0x000001244157d7c3ull, 0x0000012721b8d77full, 0x0000012a02141b10ull, 0x0000012ce269a28eull, 0x0000012fc2b96e0full, 0x00000132a3037daaull, 0x000001358347d177ull, 0x000001386386698cull, 0x0000013b43bf45ffull, 0x0000013e23f266e9ull, 0x00000141041fcc5eull, 0x00000143e4477678ull, 0x00000146c469654bull, 0x00000149a48598f0ull, 0x0000014c849c117cull, 0x0000014f64accf08ull, 0x0000015244b7d1a9ull, 0x0000015524bd1976ull, 0x0000015804bca687ull, 0x0000015ae4b678f2ull, 0x0000015dc4aa90ceull, 0x00000160a498ee31ull, 0x0000016384819134ull, 0x00000166646479ecull, 0x000001694441a870ull, 0x0000016c24191cd7ull, 0x0000016df6ca19bdull, 0x00000171e3b6d7aaull, 0x00000174c37d1e44ull, 0x00000177a33dab1cull, 0x0000017a82f87e49ull, 0x0000017d62ad97e2ull, 0x00000180425cf7feull, 0x00000182b07f3458ull, 0x0000018601aa8c19ull, 0x00000188e148c046ull, 0x0000018bc0e13b52ull, 0x0000018ea073fd52ull, 0x000001918001065dull, 0x000001945f88568bull, 0x000001973f09edf2ull, 0x0000019a1e85ccaaull, 0x0000019cfdfbf2c8ull, 0x0000019fdd6c6063ull, 0x000001a2bcd71593ull, 0x000001a59c3c126eull, 0x000001a87b9b570bull, 0x000001ab5af4e380ull, 0x000001ae3a48b7e5ull, 0x000001b11996d450ull, 0x000001b3f8df38d9ull, 0x000001b6d821e595ull, 0x000001b9b75eda9bull, 0x000001bc96961803ull, 0x000001bf75c79de3ull, 0x000001c254f36c51ull, 0x000001c534198365ull, 0x000001c81339e336ull, 0x000001caf2548bd9ull, 0x000001cdd1697d67ull, 0x000001d0b078b7f5ull, 0x000001d38f823b9aull, 0x000001d66e86086dull, 0x000001d94d841e86ull, 0x000001dc2c7c7df9ull, 0x000001df0b6f26dfull, 0x000001e1ea5c194eull, 0x000001e4c943555dull, 0x000001e7a824db23ull, 0x000001ea8700aab5ull, 0x000001ed65d6c42bull, 0x000001f044a7279dull, 0x000001f32371d51full, 0x000001f60236cccaull, 0x000001f8e0f60eb3ull, 0x000001fbbfaf9af3ull, 0x000001fe9e63719eull, 0x000002017d1192ccull, 0x000002045bb9fe94ull, 0x000002073a5cb50dull, 0x00000209c06e6212ull, 0x0000020cf791026aull, 0x0000020fd622997cull, 0x00000212b07f3458ull, 0x000002159334a8d8ull, 0x0000021871b52150ull, 0x0000021b502fe517ull, 0x0000021d6a73a78full, 0x000002210d144eeeull, 0x00000223eb7df52cull, 0x00000226c9e1e713ull, 0x00000229a84024bbull, 0x0000022c23679b4eull, 0x0000022f64eb83a8ull, 0x000002324338a51bull, 0x00000235218012a9ull, 0x00000237ffc1cc69ull, 0x0000023a2c3b0ea4ull, 0x0000023d13ee805bull, 0x0000024035e9221full, 0x00000243788faf25ull, 0x0000024656b4e735ull, 0x00000247ed646bfeull, 0x0000024c12ee3d98ull, 0x0000024ef1025c1aull, 0x00000251cf10c799ull, 0x0000025492644d65ull, 0x000002578b1c85eeull, 0x0000025a6919d8f0ull, 0x0000025d13ee805bull, 0x0000026025036716ull, 0x0000026296453882ull, 0x00000265e0d62b53ull, 0x00000268beb701f3ull, 0x0000026b9c92265eull, 0x0000026d32f798a9ull, 0x00000271583758ebull, 0x000002743601673bull, 0x0000027713c5c3b0ull, 0x00000279f1846e5full, 0x0000027ccf3d6761ull, 0x0000027e6580aecbull, 0x000002828a9e44b3ull, 0x0000028568462932ull, 0x00000287bdbf5255ull, 0x0000028b2384de4aull, 0x0000028d13ee805bull, 0x0000029035e9221full, 0x0000029296453882ull, 0x0000029699bdfb61ull, 0x0000029902a37aabull, 0x0000029c54b864c9ull, 0x0000029deabd1083ull, 0x000002a20f9c0bb5ull, 0x000002a4c7605d61ull, 0x000002a7bdbf5255ull, 0x000002a96056dafcull, 0x000002ac3daf14efull, 0x000002af1b019ecaull, 0x000002b296453882ull, 0x000002b5d022d80full, 0x000002b8fa471cb3ull, 0x000002ba9012e713ull, 0x000002bd6d4901ccull, 0x000002c04a796cf6ull, 0x000002c327a428a6ull, 0x000002c61a5e8f4cull, 0x000002c8e1e891f6ull, 0x000002cbbf023fc2ull, 0x000002ce9c163e6eull, 0x000002d179248e13ull, 0x000002d4562d2ec6ull, 0x000002d73330209dull, 0x000002da102d63b0ull, 0x000002dced24f814ull, }; #endif
12,506
74.8
93
h
null
ceph-main/src/crush/grammar.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2008 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_CRUSH_GRAMMAR_H #define CEPH_CRUSH_GRAMMAR_H //#define BOOST_SPIRIT_DEBUG #ifdef USE_BOOST_SPIRIT_OLD_HDR #include <boost/spirit/core.hpp> #include <boost/spirit/tree/ast.hpp> #include <boost/spirit/tree/tree_to_xml.hpp> #else #define BOOST_SPIRIT_USE_OLD_NAMESPACE #include <boost/spirit/include/classic_core.hpp> #include <boost/spirit/include/classic_ast.hpp> #include <boost/spirit/include/classic_tree_to_xml.hpp> #endif using namespace boost::spirit; struct crush_grammar : public boost::spirit::grammar<crush_grammar> { enum { _int = 1, _posint, _negint, _name, _device, _bucket_type, _bucket_id, _bucket_alg, _bucket_hash, _bucket_item, _bucket, _step_take, _step_set_chooseleaf_tries, _step_set_chooseleaf_vary_r, _step_set_chooseleaf_stable, _step_set_choose_tries, _step_set_choose_local_tries, _step_set_choose_local_fallback_tries, _step_choose, _step_chooseleaf, _step_emit, _step, _crushrule, _weight_set_weights, _weight_set, _choose_arg_ids, _choose_arg, _choose_args, _crushmap, _tunable, }; template <typename ScannerT> struct definition { boost::spirit::rule<ScannerT, boost::spirit::parser_context<>,boost::spirit::parser_tag<_int> > integer; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_posint> > posint; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_negint> > negint; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_name> > name; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_tunable> > tunable; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_device> > device; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_bucket_type> > bucket_type; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_bucket_id> > bucket_id; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_bucket_alg> > bucket_alg; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_bucket_hash> > bucket_hash; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_bucket_item> > bucket_item; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_bucket> > bucket; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_take> > step_take; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_set_choose_tries> > step_set_choose_tries; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_set_choose_local_tries> > step_set_choose_local_tries; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_set_choose_local_fallback_tries> > step_set_choose_local_fallback_tries; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_set_chooseleaf_tries> > step_set_chooseleaf_tries; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_set_chooseleaf_vary_r> > step_set_chooseleaf_vary_r; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_set_chooseleaf_stable> > step_set_chooseleaf_stable; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_choose> > step_choose; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_chooseleaf> > step_chooseleaf; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step_emit> > step_emit; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_step> > step; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_crushrule> > crushrule; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_weight_set_weights> > weight_set_weights; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_weight_set> > weight_set; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_choose_arg_ids> > choose_arg_ids; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_choose_arg> > choose_arg; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_choose_args> > choose_args; boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_crushmap> > crushmap; definition(crush_grammar const& /*self*/) { using boost::spirit::leaf_node_d; using boost::spirit::lexeme_d; using boost::spirit::str_p; using boost::spirit::ch_p; using boost::spirit::digit_p; using boost::spirit::alnum_p; using boost::spirit::real_p; // base types integer = leaf_node_d[ lexeme_d[ (!ch_p('-') >> +digit_p) ] ]; posint = leaf_node_d[ lexeme_d[ +digit_p ] ]; negint = leaf_node_d[ lexeme_d[ ch_p('-') >> +digit_p ] ]; name = leaf_node_d[ lexeme_d[ +( alnum_p || ch_p('-') || ch_p('_') || ch_p('.')) ] ]; // tunables tunable = str_p("tunable") >> name >> posint; // devices device = str_p("device") >> posint >> name >> !( str_p("class") >> name ); // bucket types bucket_type = str_p("type") >> posint >> name; // buckets bucket_id = str_p("id") >> negint >> !( str_p("class") >> name ); bucket_alg = str_p("alg") >> name; bucket_hash = str_p("hash") >> ( integer | str_p("rjenkins1") ); bucket_item = str_p("item") >> name >> !( str_p("weight") >> real_p ) >> !( str_p("pos") >> posint ); bucket = name >> name >> '{' >> *bucket_id >> bucket_alg >> *bucket_hash >> *bucket_item >> '}'; // rules step_take = str_p("take") >> name >> !( str_p("class") >> name ); step_set_choose_tries = str_p("set_choose_tries") >> posint; step_set_choose_local_tries = str_p("set_choose_local_tries") >> posint; step_set_choose_local_fallback_tries = str_p("set_choose_local_fallback_tries") >> posint; step_set_chooseleaf_tries = str_p("set_chooseleaf_tries") >> posint; step_set_chooseleaf_vary_r = str_p("set_chooseleaf_vary_r") >> posint; step_set_chooseleaf_stable = str_p("set_chooseleaf_stable") >> posint; step_choose = str_p("choose") >> ( str_p("indep") | str_p("firstn") ) >> integer >> str_p("type") >> name; step_chooseleaf = str_p("chooseleaf") >> ( str_p("indep") | str_p("firstn") ) >> integer >> str_p("type") >> name; step_emit = str_p("emit"); step = str_p("step") >> ( step_take | step_set_choose_tries | step_set_choose_local_tries | step_set_choose_local_fallback_tries | step_set_chooseleaf_tries | step_set_chooseleaf_vary_r | step_set_chooseleaf_stable | step_choose | step_chooseleaf | step_emit ); crushrule = str_p("rule") >> !name >> '{' >> (str_p("id") | str_p("ruleset")) >> posint >> str_p("type") >> ( str_p("replicated") | str_p("erasure") ) >> !(str_p("min_size") >> posint) >> !(str_p("max_size") >> posint) >> +step >> '}'; weight_set_weights = str_p("[") >> *real_p >> str_p("]"); weight_set = str_p("weight_set") >> str_p("[") >> *weight_set_weights >> str_p("]"); choose_arg_ids = str_p("ids") >> str_p("[") >> *integer >> str_p("]"); choose_arg = str_p("{") >> str_p("bucket_id") >> negint >> !weight_set >> !choose_arg_ids >> str_p("}"); choose_args = str_p("choose_args") >> posint >> str_p("{") >> *choose_arg >> str_p("}"); // the whole crush map crushmap = *(tunable | device | bucket_type) >> *(bucket | crushrule) >> *choose_args; } boost::spirit::rule<ScannerT, boost::spirit::parser_context<>, boost::spirit::parser_tag<_crushmap> > const& start() const { return crushmap; } }; }; #endif
9,040
43.9801
174
h
null
ceph-main/src/crush/hash.h
#ifndef CEPH_CRUSH_HASH_H #define CEPH_CRUSH_HASH_H #ifdef __KERNEL__ # include <linux/types.h> #else # include "crush_compat.h" #endif #define CRUSH_HASH_RJENKINS1 0 #define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 extern const char *crush_hash_name(int type); extern __u32 crush_hash32(int type, __u32 a); extern __u32 crush_hash32_2(int type, __u32 a, __u32 b); extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c); extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d); extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e); #endif
611
24.5
74
h
null
ceph-main/src/crush/mapper.h
#ifndef CEPH_CRUSH_MAPPER_H #define CEPH_CRUSH_MAPPER_H /* * CRUSH functions for find rules and then mapping an input to an * output set. * * LGPL-2.1 or LGPL-3.0 */ #include "crush.h" /** @ingroup API * * Map __x__ to __result_max__ items and store them in the __result__ * array. The mapping is done by following each step of the rule * __ruleno__. See crush_make_rule(), crush_rule_set_step() and * crush_add_rule() for more information on how the rules are created, * populated and added to the crush __map__. * * The return value is the the number of items in the __result__ * array. If the caller asked for __result_max__ items and the return * value is X where X < __result_max__, the content of __result[0,X[__ * is defined but the content of __result[X,result_max[__ is * undefined. For example: * * crush_do_rule(map, ruleno=1, x=1, result, result_max=3,...) == 1 * result[0] is set * result[1] is undefined * result[2] is undefined * * An entry in the __result__ array is either an item in the crush * __map__ or ::CRUSH_ITEM_NONE if no item was found. For example: * * crush_do_rule(map, ruleno=1, x=1, result, result_max=4,...) == 2 * result[0] is CRUSH_ITEM_NONE * result[1] is item number 5 * result[2] is undefined * result[3] is undefined * * The __weight__ array contains the probabilities that a leaf is * ignored even if it is selected. It is a 16.16 fixed point * number in the range [0x00000,0x10000]. The lower the value, the * more often the leaf is ignored. For instance: * * - weight[leaf] == 0x00000 == 0.0 always ignore * - weight[leaf] == 0x10000 == 1.0 never ignore * - weight[leaf] == 0x08000 == 0.5 ignore 50% of the time * - weight[leaf] == 0x04000 == 0.25 ignore 75% of the time * - etc. * * During mapping, each leaf is checked against the __weight__ array, * using the leaf as an index. If there is no entry in __weight__ for * the leaf, it is ignored. If there is an entry, the leaf will be * ignored some of the time, depending on the probability. * * The __cwin__ argument must be set as follows: * * char __cwin__[crush_work_size(__map__, __result_max__)]; * crush_init_workspace(__map__, __cwin__); * * @param map the crush_map * @param ruleno a positive integer < __CRUSH_MAX_RULES__ * @param x the value to map to __result_max__ items * @param result an array of items of size __result_max__ * @param result_max the size of the __result__ array * @param weights an array of weights of size __weight_max__ * @param weight_max the size of the __weights__ array * @param cwin must be an char array initialized by crush_init_workspace * @param choose_args weights and ids for each known bucket * * @return 0 on error or the size of __result__ on success */ extern int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, const __u32 *weights, int weight_max, void *cwin, const struct crush_choose_arg *choose_args); /* Returns the exact amount of workspace that will need to be used for a given combination of crush_map and result_max. The caller can then allocate this much on its own, either on the stack, in a per-thread long-lived buffer, or however it likes. */ static inline size_t crush_work_size(const struct crush_map *map, int result_max) { return map->working_size + result_max * 3 * sizeof(__u32); } extern void crush_init_workspace(const struct crush_map *m, void *v); #endif
3,525
36.913978
72
h
null
ceph-main/src/crush/types.h
#ifndef CEPH_CRUSH_TYPES_H #define CEPH_CRUSH_TYPES_H #ifdef KERNEL # define free(x) kfree(x) #else # include <stdlib.h> #endif #include <linux/types.h> /* just for int types */ #ifndef BUG_ON # define BUG_ON(x) ceph_assert(!(x)) #endif #endif
250
12.944444
50
h
null
ceph-main/src/crypto/crypto_accel.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2016 Mirantis, Inc. * * Author: Adam Kupczyk <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CRYPTO_ACCEL_H #define CRYPTO_ACCEL_H #include <cstddef> #include "include/Context.h" class optional_yield; class CryptoAccel; typedef std::shared_ptr<CryptoAccel> CryptoAccelRef; class CryptoAccel { public: CryptoAccel() {} CryptoAccel(const size_t chunk_size, const size_t max_requests) {} virtual ~CryptoAccel() {} static const int AES_256_IVSIZE = 128/8; static const int AES_256_KEYSIZE = 256/8; virtual bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) = 0; virtual bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) = 0; virtual bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size, const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) = 0; virtual bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size, const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) = 0; }; #endif
1,869
35.666667
90
h
null
ceph-main/src/crypto/crypto_plugin.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2016 Mirantis, Inc. * * Author: Adam Kupczyk <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CRYPTO_PLUGIN_H #define CRYPTO_PLUGIN_H // ----------------------------------------------------------------------------- #include "common/PluginRegistry.h" #include "ostream" #include "crypto/crypto_accel.h" #include <boost/asio/io_context.hpp> // ----------------------------------------------------------------------------- class CryptoPlugin : public ceph::Plugin { public: CryptoAccelRef cryptoaccel; explicit CryptoPlugin(CephContext* cct) : Plugin(cct) {} ~CryptoPlugin() {} virtual int factory(CryptoAccelRef *cs, std::ostream *ss, const size_t chunk_size, const size_t max_requests) = 0; }; #endif
1,109
26.75
80
h
null
ceph-main/src/crypto/isa-l/isal_crypto_accel.cc
/* * Ceph - scalable distributed file system * * Copyright (C) 2016 Mirantis, Inc. * * Author: Adam Kupczyk <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #include "crypto/isa-l/isal_crypto_accel.h" #include "crypto/isa-l/isa-l_crypto/include/aes_cbc.h" bool ISALCryptoAccel::cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) { if (unlikely((size % AES_256_IVSIZE) != 0)) { return false; } alignas(16) struct cbc_key_data keys_blk; aes_cbc_precomp(const_cast<unsigned char*>(&key[0]), AES_256_KEYSIZE, &keys_blk); aes_cbc_enc_256(const_cast<unsigned char*>(in), const_cast<unsigned char*>(&iv[0]), keys_blk.enc_keys, out, size); return true; } bool ISALCryptoAccel::cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) { if (unlikely((size % AES_256_IVSIZE) != 0)) { return false; } alignas(16) struct cbc_key_data keys_blk; aes_cbc_precomp(const_cast<unsigned char*>(&key[0]), AES_256_KEYSIZE, &keys_blk); aes_cbc_dec_256(const_cast<unsigned char*>(in), const_cast<unsigned char*>(&iv[0]), keys_blk.dec_keys, out, size); return true; }
1,770
37.5
116
cc
null
ceph-main/src/crypto/isa-l/isal_crypto_accel.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2016 Mirantis, Inc. * * Author: Adam Kupczyk <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef ISAL_CRYPTO_ACCEL_H #define ISAL_CRYPTO_ACCEL_H #include "crypto/crypto_accel.h" #include "common/async/yield_context.h" class ISALCryptoAccel : public CryptoAccel { public: ISALCryptoAccel() {} virtual ~ISALCryptoAccel() {} bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override; bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override; bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size, const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override { return false; } bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size, const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override { return false; } }; #endif
1,707
38.72093
82
h
null
ceph-main/src/crypto/isa-l/isal_crypto_plugin.cc
/* * Ceph - scalable distributed file system * * Copyright (C) 2016 Mirantis, Inc. * * Author: Adam Kupczyk <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ // ----------------------------------------------------------------------------- #include "crypto/isa-l/isal_crypto_plugin.h" #include "ceph_ver.h" // ----------------------------------------------------------------------------- const char *__ceph_plugin_version() { return CEPH_GIT_NICE_VER; } int __ceph_plugin_init(CephContext *cct, const std::string& type, const std::string& name) { auto instance = cct->get_plugin_registry(); return instance->add(type, name, new ISALCryptoPlugin(cct)); }
966
26.628571
80
cc
null
ceph-main/src/crypto/isa-l/isal_crypto_plugin.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2016 Mirantis, Inc. * * Author: Adam Kupczyk <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef ISAL_CRYPTO_PLUGIN_H #define ISAL_CRYPTO_PLUGIN_H // ----------------------------------------------------------------------------- #include "crypto/crypto_plugin.h" #include "crypto/isa-l/isal_crypto_accel.h" #include "arch/intel.h" #include "arch/probe.h" // ----------------------------------------------------------------------------- class ISALCryptoPlugin : public CryptoPlugin { public: explicit ISALCryptoPlugin(CephContext* cct) : CryptoPlugin(cct) {} ~ISALCryptoPlugin() {} virtual int factory(CryptoAccelRef *cs, std::ostream *ss, const size_t chunk_size, const size_t max_requests) { if (cryptoaccel == nullptr) { ceph_arch_probe(); if (ceph_arch_intel_aesni && ceph_arch_intel_sse41) { cryptoaccel = CryptoAccelRef(new ISALCryptoAccel); } } *cs = cryptoaccel; return 0; } }; #endif
1,345
25.92
80
h
null
ceph-main/src/crypto/openssl/openssl_crypto_accel.cc
/* * Ceph - scalable distributed file system * * Copyright (C) 2017 Intel Corporation * * Author: Qiaowei Ren <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #include "crypto/openssl/openssl_crypto_accel.h" #include <openssl/evp.h> #include <openssl/engine.h> #include "common/debug.h" // ----------------------------------------------------------------------------- #define dout_context g_ceph_context #define dout_subsys ceph_subsys_crypto #undef dout_prefix #define dout_prefix _prefix(_dout) static std::ostream& _prefix(std::ostream* _dout) { return *_dout << "OpensslCryptoAccel: "; } // ----------------------------------------------------------------------------- #define EVP_SUCCESS 1 #define AES_ENCRYPT 1 #define AES_DECRYPT 0 bool evp_transform(unsigned char* out, const unsigned char* in, size_t size, const unsigned char* iv, const unsigned char* key, ENGINE* engine, const EVP_CIPHER* const type, const int encrypt) { using pctx_t = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>; pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free }; if (!pctx) { derr << "failed to create evp cipher context" << dendl; return false; } if (EVP_CipherInit_ex(pctx.get(), type, engine, key, iv, encrypt) != EVP_SUCCESS) { derr << "EVP_CipherInit_ex failed" << dendl; return false; } if (EVP_CIPHER_CTX_set_padding(pctx.get(), 0) != EVP_SUCCESS) { derr << "failed to disable PKCS padding" << dendl; return false; } int len_update = 0; if (EVP_CipherUpdate(pctx.get(), out, &len_update, in, size) != EVP_SUCCESS) { derr << "EVP_CipherUpdate failed" << dendl; return false; } int len_final = 0; if (EVP_CipherFinal_ex(pctx.get(), out + len_update, &len_final) != EVP_SUCCESS) { derr << "EVP_CipherFinal_ex failed" << dendl; return false; } ceph_assert(len_final == 0); return (len_update + len_final) == static_cast<int>(size); } bool OpenSSLCryptoAccel::cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) { if (unlikely((size % AES_256_IVSIZE) != 0)) { return false; } return evp_transform(out, in, size, const_cast<unsigned char*>(&iv[0]), const_cast<unsigned char*>(&key[0]), nullptr, // Hardware acceleration engine can be used in the future EVP_aes_256_cbc(), AES_ENCRYPT); } bool OpenSSLCryptoAccel::cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) { if (unlikely((size % AES_256_IVSIZE) != 0)) { return false; } return evp_transform(out, in, size, const_cast<unsigned char*>(&iv[0]), const_cast<unsigned char*>(&key[0]), nullptr, // Hardware acceleration engine can be used in the future EVP_aes_256_cbc(), AES_DECRYPT); }
3,626
32.897196
94
cc
null
ceph-main/src/crypto/openssl/openssl_crypto_accel.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2017 Intel Corporation * * Author: Qiaowei Ren <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef OPENSSL_CRYPTO_ACCEL_H #define OPENSSL_CRYPTO_ACCEL_H #include "crypto/crypto_accel.h" #include "common/async/yield_context.h" class OpenSSLCryptoAccel : public CryptoAccel { public: OpenSSLCryptoAccel() {} virtual ~OpenSSLCryptoAccel() {} bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override; bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override; bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size, const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override { return false; } bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size, const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override { return false; } }; #endif
1,725
38.227273
82
h
null
ceph-main/src/crypto/openssl/openssl_crypto_plugin.cc
/* * Ceph - scalable distributed file system * * Copyright (C) 2017 Intel Corporation * * Author: Qiaowei Ren <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #include "crypto/openssl/openssl_crypto_plugin.h" #include "ceph_ver.h" const char *__ceph_plugin_version() { return CEPH_GIT_NICE_VER; } int __ceph_plugin_init(CephContext *cct, const std::string& type, const std::string& name) { auto instance = cct->get_plugin_registry(); return instance->add(type, name, new OpenSSLCryptoPlugin(cct)); }
813
23.666667
70
cc