Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/src/librbd/deep_copy/Utils.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "common/debug.h" #include "Utils.h" #include <set> namespace librbd { namespace deep_copy { namespace util { #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::deep_copy::util::" << __func__ << ": " void compute_snap_map(CephContext* cct, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, const SnapIds& dst_snap_ids, const SnapSeqs &snap_seqs, SnapMap *snap_map) { std::set<librados::snap_t> ordered_dst_snap_ids{ dst_snap_ids.begin(), dst_snap_ids.end()}; auto dst_snap_id_it = ordered_dst_snap_ids.begin(); SnapIds snap_ids; for (auto &it : snap_seqs) { // ensure all dst snap ids are included in the mapping table since // deep copy will skip non-user snapshots while (dst_snap_id_it != ordered_dst_snap_ids.end()) { if (*dst_snap_id_it < it.second) { snap_ids.insert(snap_ids.begin(), *dst_snap_id_it); } else if (*dst_snap_id_it > it.second) { break; } ++dst_snap_id_it; } // we should only have the HEAD revision in the last snap seq ceph_assert(snap_ids.empty() || snap_ids[0] != CEPH_NOSNAP); snap_ids.insert(snap_ids.begin(), it.second); if (it.first < src_snap_id_start) { continue; } else if (it.first > src_snap_id_end) { break; } (*snap_map)[it.first] = snap_ids; } ldout(cct, 10) << "src_snap_id_start=" << src_snap_id_start << ", " << "src_snap_id_end=" << src_snap_id_end << ", " << "dst_snap_ids=" << dst_snap_ids << ", " << "snap_seqs=" << snap_seqs << ", " << "snap_map=" << *snap_map << dendl; } } // namespace util } // namespace deep_copy } // namespace librbd
1,954
30.532258
77
cc
null
ceph-main/src/librbd/deep_copy/Utils.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_UTILS_H #define CEPH_LIBRBD_DEEP_COPY_UTILS_H #include "include/common_fwd.h" #include "include/rados/librados.hpp" #include "librbd/Types.h" #include "librbd/deep_copy/Types.h" #include <boost/optional.hpp> namespace librbd { namespace deep_copy { namespace util { void compute_snap_map(CephContext* cct, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, const SnapIds& dst_snap_ids, const SnapSeqs &snap_seqs, SnapMap *snap_map); } // namespace util } // namespace deep_copy } // namespace librbd #endif // CEPH_LIBRBD_DEEP_COPY_UTILS_H
804
25.833333
70
h
null
ceph-main/src/librbd/exclusive_lock/AutomaticPolicy.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/exclusive_lock/AutomaticPolicy.h" #include "librbd/ImageCtx.h" #include "librbd/ExclusiveLock.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::ExclusiveLock::AutomaticPolicy " namespace librbd { namespace exclusive_lock { int AutomaticPolicy::lock_requested(bool force) { ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock)); ceph_assert(m_image_ctx->exclusive_lock != nullptr); ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force << dendl; // release the lock upon request (ignore forced requests) m_image_ctx->exclusive_lock->release_lock(nullptr); return 0; } } // namespace exclusive_lock } // namespace librbd
839
27
79
cc
null
ceph-main/src/librbd/exclusive_lock/AutomaticPolicy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H #include "librbd/exclusive_lock/Policy.h" namespace librbd { struct ImageCtx; namespace exclusive_lock { class AutomaticPolicy : public Policy { public: AutomaticPolicy(ImageCtx *image_ctx) : m_image_ctx(image_ctx) { } bool may_auto_request_lock() override { return true; } int lock_requested(bool force) override; private: ImageCtx *m_image_ctx; }; } // namespace exclusive_lock } // namespace librbd #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H
692
18.8
70
h
null
ceph-main/src/librbd/exclusive_lock/ImageDispatch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/exclusive_lock/ImageDispatch.h" #include "include/Context.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/exclusive_lock/Policy.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ImageDispatcherInterface.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::exclusive_lock::ImageDispatch: " \ << this << " " << __func__ << ": " namespace librbd { namespace exclusive_lock { using util::create_context_callback; using util::create_async_context_callback; template <typename I> ImageDispatch<I>::ImageDispatch(I* image_ctx) : m_image_ctx(image_ctx), m_lock(ceph::make_shared_mutex( util::unique_lock_name("librbd::exclusive_lock::ImageDispatch::m_lock", this))) { } template <typename I> void ImageDispatch<I>::shut_down(Context* on_finish) { // release any IO waiting on exclusive lock Contexts on_dispatches; { std::unique_lock locker{m_lock}; std::swap(on_dispatches, m_on_dispatches); } for (auto ctx : on_dispatches) { ctx->complete(0); } on_finish->complete(0); } template <typename I> void ImageDispatch<I>::set_require_lock(bool init_shutdown, io::Direction direction, Context* on_finish) { // pause any matching IO from proceeding past this layer set_require_lock(direction, true); if (direction == io::DIRECTION_READ) { on_finish->complete(0); return; } // push through a flush for any in-flight writes at lower levels auto aio_comp = io::AioCompletion::create_and_start( on_finish, util::get_image_ctx(m_image_ctx), io::AIO_TYPE_FLUSH); auto req = io::ImageDispatchSpec::create_flush( *m_image_ctx, io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, aio_comp, (init_shutdown ? io::FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH : io::FLUSH_SOURCE_EXCLUSIVE_LOCK), {}); req->send(); } template <typename I> void ImageDispatch<I>::unset_require_lock(io::Direction direction) { set_require_lock(direction, false); } template <typename I> bool ImageDispatch<I>::set_require_lock(io::Direction direction, bool enabled) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "direction=" << direction << ", enabled=" << enabled << dendl; std::unique_lock locker{m_lock}; auto prev_require_lock = (m_require_lock_on_read || m_require_lock_on_write); switch (direction) { case io::DIRECTION_READ: m_require_lock_on_read = enabled; break; case io::DIRECTION_WRITE: m_require_lock_on_write = enabled; break; case io::DIRECTION_BOTH: m_require_lock_on_read = enabled; m_require_lock_on_write = enabled; break; } bool require_lock = (m_require_lock_on_read || m_require_lock_on_write); return ((enabled && !prev_require_lock && require_lock) || (!enabled && prev_require_lock && !require_lock)); } template <typename I> bool ImageDispatch<I>::read( io::AioCompletion* aio_comp, io::Extents &&image_extents, io::ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "image_extents=" << image_extents << dendl; if (needs_exclusive_lock(true, tid, dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool ImageDispatch<I>::write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool ImageDispatch<I>::discard( io::AioCompletion* aio_comp, io::Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool ImageDispatch<I>::write_same( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool ImageDispatch<I>::compare_and_write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool ImageDispatch<I>::flush( io::AioCompletion* aio_comp, io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; // don't attempt to grab the exclusive lock if were are just internally // clearing out our in-flight IO queue if (flush_source != io::FLUSH_SOURCE_USER) { return false; } if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool ImageDispatch<I>::is_lock_required(bool read_op) const { ceph_assert(ceph_mutex_is_locked(m_lock)); return ((read_op && m_require_lock_on_read) || (!read_op && m_require_lock_on_write)); } template <typename I> bool ImageDispatch<I>::needs_exclusive_lock(bool read_op, uint64_t tid, io::DispatchResult* dispatch_result, Context* on_dispatched) { auto cct = m_image_ctx->cct; bool lock_required = false; { std::shared_lock locker{m_lock}; lock_required = is_lock_required(read_op); } if (lock_required) { std::shared_lock owner_locker{m_image_ctx->owner_lock}; if (m_image_ctx->exclusive_lock == nullptr) { // raced with the exclusive lock being disabled return false; } ldout(cct, 5) << "exclusive lock required: delaying IO" << dendl; if (!m_image_ctx->get_exclusive_lock_policy()->may_auto_request_lock()) { lderr(cct) << "op requires exclusive lock" << dendl; *dispatch_result = io::DISPATCH_RESULT_CONTINUE; on_dispatched->complete( m_image_ctx->exclusive_lock->get_unlocked_op_error()); return true; } // block potential races with other incoming IOs std::unique_lock locker{m_lock}; bool retesting_lock = ( !m_on_dispatches.empty() && m_on_dispatches.front() == on_dispatched); if (!m_on_dispatches.empty() && !retesting_lock) { *dispatch_result = io::DISPATCH_RESULT_RESTART; m_on_dispatches.push_back(on_dispatched); return true; } if (!is_lock_required(read_op)) { return false; } ceph_assert(m_on_dispatches.empty() || retesting_lock); m_on_dispatches.push_back(on_dispatched); locker.unlock(); *dispatch_result = io::DISPATCH_RESULT_RESTART; auto ctx = create_async_context_callback( *m_image_ctx, create_context_callback< ImageDispatch<I>, &ImageDispatch<I>::handle_acquire_lock>(this)); m_image_ctx->exclusive_lock->acquire_lock(ctx); return true; } return false; } template <typename I> void ImageDispatch<I>::handle_acquire_lock(int r) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "r=" << r << dendl; std::unique_lock locker{m_lock}; ceph_assert(!m_on_dispatches.empty()); Context* failed_dispatch = nullptr; Contexts on_dispatches; if (r == -ERESTART) { ldout(cct, 5) << "IO raced with exclusive lock shutdown" << dendl; } else if (r < 0) { lderr(cct) << "failed to acquire exclusive lock: " << cpp_strerror(r) << dendl; failed_dispatch = m_on_dispatches.front(); m_on_dispatches.pop_front(); } // re-test if lock is still required (i.e. it wasn't acquired/lost) via a // restart dispatch std::swap(on_dispatches, m_on_dispatches); locker.unlock(); if (failed_dispatch != nullptr) { failed_dispatch->complete(r); } for (auto ctx : on_dispatches) { ctx->complete(0); } } } // namespace exclusive_lock } // namespace librbd template class librbd::exclusive_lock::ImageDispatch<librbd::ImageCtx>;
10,081
30.4081
80
cc
null
ceph-main/src/librbd/exclusive_lock/ImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H #include "librbd/io/ImageDispatchInterface.h" #include "include/int_types.h" #include "include/buffer.h" #include "common/ceph_mutex.h" #include "common/zipkin_trace.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" #include <atomic> #include <list> #include <unordered_set> struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; } namespace exclusive_lock { template <typename ImageCtxT> class ImageDispatch : public io::ImageDispatchInterface { public: static ImageDispatch* create(ImageCtxT* image_ctx) { return new ImageDispatch(image_ctx); } void destroy() { delete this; } ImageDispatch(ImageCtxT* image_ctx); io::ImageDispatchLayer get_dispatch_layer() const override { return io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK; } void set_require_lock(bool init_shutdown, io::Direction direction, Context* on_finish); void unset_require_lock(io::Direction direction); void shut_down(Context* on_finish) override; bool read( io::AioCompletion* aio_comp, io::Extents &&image_extents, io::ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( io::AioCompletion* aio_comp, io::Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( io::AioCompletion* aio_comp, io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( io::AioCompletion* aio_comp, io::Extents&& image_extents, io::SnapIds&& snap_ids, int list_snaps_flags, io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } private: typedef std::list<Context*> Contexts; typedef std::unordered_set<uint64_t> Tids; ImageCtxT* m_image_ctx; mutable ceph::shared_mutex m_lock; bool m_require_lock_on_read = false; bool m_require_lock_on_write = false; Contexts m_on_dispatches; bool set_require_lock(io::Direction direction, bool enabled); bool is_lock_required(bool read_op) const; bool needs_exclusive_lock(bool read_op, uint64_t tid, io::DispatchResult* dispatch_result, Context* on_dispatched); void handle_acquire_lock(int r); }; } // namespace exclusiv_lock } // namespace librbd extern template class librbd::exclusive_lock::ImageDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H
4,518
32.723881
80
h
null
ceph-main/src/librbd/exclusive_lock/Policy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H namespace librbd { namespace exclusive_lock { enum OperationRequestType { OPERATION_REQUEST_TYPE_GENERAL = 0, OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE = 1, OPERATION_REQUEST_TYPE_FORCE_PROMOTION = 2, }; struct Policy { virtual ~Policy() { } virtual bool may_auto_request_lock() = 0; virtual int lock_requested(bool force) = 0; virtual bool accept_blocked_request(OperationRequestType) { return false; } }; } // namespace exclusive_lock } // namespace librbd #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H
732
21.90625
70
h
null
ceph-main/src/librbd/exclusive_lock/PostAcquireRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/exclusive_lock/PostAcquireRequest.h" #include "cls/lock/cls_lock_client.h" #include "cls/lock/cls_lock_types.h" #include "common/dout.h" #include "common/errno.h" #include "include/stringify.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/ImageWatcher.h" #include "librbd/Journal.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/image/RefreshRequest.h" #include "librbd/journal/Policy.h" #include "librbd/PluginRegistry.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::exclusive_lock::PostAcquireRequest: " \ << this << " " << __func__ << ": " namespace librbd { namespace exclusive_lock { using util::create_async_context_callback; using util::create_context_callback; using util::create_rados_callback; template <typename I> PostAcquireRequest<I>* PostAcquireRequest<I>::create(I &image_ctx, Context *on_acquire, Context *on_finish) { return new PostAcquireRequest(image_ctx, on_acquire, on_finish); } template <typename I> PostAcquireRequest<I>::PostAcquireRequest(I &image_ctx, Context *on_acquire, Context *on_finish) : m_image_ctx(image_ctx), m_on_acquire(on_acquire), m_on_finish(create_async_context_callback(image_ctx, on_finish)), m_object_map(nullptr), m_journal(nullptr), m_error_result(0) { } template <typename I> PostAcquireRequest<I>::~PostAcquireRequest() { if (!m_prepare_lock_completed) { m_image_ctx.state->handle_prepare_lock_complete(); } delete m_on_acquire; } template <typename I> void PostAcquireRequest<I>::send() { send_refresh(); } template <typename I> void PostAcquireRequest<I>::send_refresh() { if (!m_image_ctx.state->is_refresh_required()) { send_open_object_map(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PostAcquireRequest<I>; Context *ctx = create_async_context_callback( m_image_ctx, create_context_callback<klass, &klass::handle_refresh>(this)); // ImageState is blocked waiting for lock to complete -- safe to directly // refresh image::RefreshRequest<I> *req = image::RefreshRequest<I>::create( m_image_ctx, true, false, ctx); req->send(); } template <typename I> void PostAcquireRequest<I>::handle_refresh(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r == -ERESTART) { // next issued IO or op will (re)-refresh the image and shut down lock ldout(cct, 5) << "exclusive lock dynamically disabled" << dendl; r = 0; } else if (r < 0) { lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl; save_result(r); revert(); finish(); return; } send_open_object_map(); } template <typename I> void PostAcquireRequest<I>::send_open_journal() { // alert caller that we now own the exclusive lock m_on_acquire->complete(0); m_on_acquire = nullptr; bool journal_enabled; { std::shared_lock image_locker{m_image_ctx.image_lock}; journal_enabled = (m_image_ctx.test_features(RBD_FEATURE_JOURNALING, m_image_ctx.image_lock) && !m_image_ctx.get_journal_policy()->journal_disabled()); } if (!journal_enabled) { apply(); send_process_plugin_acquire_lock(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PostAcquireRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_open_journal>( this); m_journal = m_image_ctx.create_journal(); // journal playback requires object map (if enabled) and itself apply(); m_journal->open(ctx); } template <typename I> void PostAcquireRequest<I>::handle_open_journal(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to open journal: " << cpp_strerror(r) << dendl; send_close_journal(); return; } send_allocate_journal_tag(); } template <typename I> void PostAcquireRequest<I>::send_allocate_journal_tag() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; std::shared_lock image_locker{m_image_ctx.image_lock}; using klass = PostAcquireRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_allocate_journal_tag>(this, m_journal); m_image_ctx.get_journal_policy()->allocate_tag_on_lock(ctx); } template <typename I> void PostAcquireRequest<I>::handle_allocate_journal_tag(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to allocate journal tag: " << cpp_strerror(r) << dendl; send_close_journal(); return; } send_process_plugin_acquire_lock(); } template <typename I> void PostAcquireRequest<I>::send_process_plugin_acquire_lock() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PostAcquireRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_process_plugin_acquire_lock>(this); m_image_ctx.plugin_registry->acquired_exclusive_lock(ctx); } template <typename I> void PostAcquireRequest<I>::handle_process_plugin_acquire_lock(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to process plugins: " << cpp_strerror(r) << dendl; send_process_plugin_release_lock(); return; } finish(); } template <typename I> void PostAcquireRequest<I>::send_process_plugin_release_lock() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PostAcquireRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_process_plugin_release_lock>(this); m_image_ctx.plugin_registry->prerelease_exclusive_lock(ctx); } template <typename I> void PostAcquireRequest<I>::handle_process_plugin_release_lock(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to release plugins: " << cpp_strerror(r) << dendl; } send_close_journal(); } template <typename I> void PostAcquireRequest<I>::send_close_journal() { if (m_journal == nullptr) { send_close_object_map(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PostAcquireRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_close_journal>( this); m_journal->close(ctx); } template <typename I> void PostAcquireRequest<I>::handle_close_journal(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl; } send_close_object_map(); } template <typename I> void PostAcquireRequest<I>::send_open_object_map() { if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) { send_open_journal(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PostAcquireRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_open_object_map>( this); m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP); m_object_map->open(ctx); } template <typename I> void PostAcquireRequest<I>::handle_open_object_map(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0) { lderr(cct) << "failed to open object map: " << cpp_strerror(r) << dendl; m_object_map->put(); m_object_map = nullptr; if (r != -EFBIG) { save_result(r); revert(); finish(); return; } } send_open_journal(); } template <typename I> void PostAcquireRequest<I>::send_close_object_map() { if (m_object_map == nullptr) { revert(); finish(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PostAcquireRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_close_object_map>(this); m_object_map->close(ctx); } template <typename I> void PostAcquireRequest<I>::handle_close_object_map(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0) { lderr(cct) << "failed to close object map: " << cpp_strerror(r) << dendl; } revert(); finish(); } template <typename I> void PostAcquireRequest<I>::apply() { { std::unique_lock image_locker{m_image_ctx.image_lock}; ceph_assert(m_image_ctx.object_map == nullptr); m_image_ctx.object_map = m_object_map; ceph_assert(m_image_ctx.journal == nullptr); m_image_ctx.journal = m_journal; } m_prepare_lock_completed = true; m_image_ctx.state->handle_prepare_lock_complete(); } template <typename I> void PostAcquireRequest<I>::revert() { std::unique_lock image_locker{m_image_ctx.image_lock}; m_image_ctx.object_map = nullptr; m_image_ctx.journal = nullptr; if (m_object_map) { m_object_map->put(); } if (m_journal) { m_journal->put(); } ceph_assert(m_error_result < 0); } template <typename I> void PostAcquireRequest<I>::finish() { m_on_finish->complete(m_error_result); delete this; } } // namespace exclusive_lock } // namespace librbd template class librbd::exclusive_lock::PostAcquireRequest<librbd::ImageCtx>;
9,778
25.501355
80
cc
null
ceph-main/src/librbd/exclusive_lock/PostAcquireRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "librbd/ImageCtx.h" #include "msg/msg_types.h" #include <string> class Context; namespace librbd { namespace exclusive_lock { template <typename ImageCtxT = ImageCtx> class PostAcquireRequest { public: static PostAcquireRequest* create(ImageCtxT &image_ctx, Context *on_acquire, Context *on_finish); ~PostAcquireRequest(); void send(); private: /** * @verbatim * * <start> * | * | * v * REFRESH (skip if not * | needed) * v * OPEN_OBJECT_MAP (skip if * | disabled) * v * OPEN_JOURNAL (skip if * | * disabled) * | * * | * * * * * * * * * v * * ALLOCATE_JOURNAL_TAG * * | * * * | * * * v * * * PROCESS_PLUGIN_ACQUIRE* * | * * * | * * * | v v v * | PROCESS_PLUGIN_RELEASE * | | * | v * | CLOSE_JOURNAL * | | * | v * | CLOSE_OBJECT_MAP * | | * v | * <finish> <----------/ * * @endverbatim */ PostAcquireRequest(ImageCtxT &image_ctx, Context *on_acquire, Context *on_finish); ImageCtxT &m_image_ctx; Context *m_on_acquire; Context *m_on_finish; decltype(m_image_ctx.object_map) m_object_map; decltype(m_image_ctx.journal) m_journal; bool m_prepare_lock_completed = false; int m_error_result; void send_refresh(); void handle_refresh(int r); void send_open_journal(); void handle_open_journal(int r); void send_allocate_journal_tag(); void handle_allocate_journal_tag(int r); void send_open_object_map(); void handle_open_object_map(int r); void send_close_journal(); void handle_close_journal(int r); void send_close_object_map(); void handle_close_object_map(int r); void send_process_plugin_acquire_lock(); void handle_process_plugin_acquire_lock(int r); void send_process_plugin_release_lock(); void handle_process_plugin_release_lock(int r); void apply(); void revert(); void finish(); void save_result(int result) { if (m_error_result == 0 && result < 0) { m_error_result = result; } } }; } // namespace exclusive_lock } // namespace librbd extern template class librbd::exclusive_lock::PostAcquireRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H
2,910
22.288
83
h
null
ceph-main/src/librbd/exclusive_lock/PreAcquireRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/exclusive_lock/PreAcquireRequest.h" #include "librbd/Utils.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/ImageCtx.h" #include "librbd/ImageWatcher.h" #include "librbd/ImageState.h" #include "librbd/asio/ContextWQ.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::exclusive_lock::PreAcquireRequest: " \ << this << " " << __func__ << ": " namespace librbd { namespace exclusive_lock { using util::create_async_context_callback; using util::create_context_callback; using util::create_rados_callback; template <typename I> PreAcquireRequest<I>* PreAcquireRequest<I>::create(I &image_ctx, Context *on_finish) { return new PreAcquireRequest(image_ctx, on_finish); } template <typename I> PreAcquireRequest<I>::PreAcquireRequest(I &image_ctx, Context *on_finish) : m_image_ctx(image_ctx), m_on_finish(create_async_context_callback(image_ctx, on_finish)), m_error_result(0) { } template <typename I> PreAcquireRequest<I>::~PreAcquireRequest() { } template <typename I> void PreAcquireRequest<I>::send() { send_prepare_lock(); } template <typename I> void PreAcquireRequest<I>::send_prepare_lock() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; // acquire the lock if the image is not busy performing other actions Context *ctx = create_context_callback< PreAcquireRequest<I>, &PreAcquireRequest<I>::handle_prepare_lock>(this); m_image_ctx.state->prepare_lock(ctx); } template <typename I> void PreAcquireRequest<I>::handle_prepare_lock(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; send_flush_notifies(); } template <typename I> void PreAcquireRequest<I>::send_flush_notifies() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PreAcquireRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_flush_notifies>( this); m_image_ctx.image_watcher->flush(ctx); } template <typename I> void PreAcquireRequest<I>::handle_flush_notifies(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; ceph_assert(r == 0); finish(); } template <typename I> void PreAcquireRequest<I>::finish() { m_on_finish->complete(m_error_result); delete this; } } // namespace exclusive_lock } // namespace librbd template class librbd::exclusive_lock::PreAcquireRequest<librbd::ImageCtx>;
2,609
26.1875
79
cc
null
ceph-main/src/librbd/exclusive_lock/PreAcquireRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_ACQUIRE_REQUEST_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_ACQUIRE_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "librbd/ImageCtx.h" #include "msg/msg_types.h" #include <string> class Context; namespace librbd { namespace exclusive_lock { template <typename ImageCtxT = ImageCtx> class PreAcquireRequest { public: static PreAcquireRequest* create(ImageCtxT &image_ctx, Context *on_finish); ~PreAcquireRequest(); void send(); private: /** * @verbatim * * <start> * | * v * PREPARE_LOCK * | * v * FLUSH_NOTIFIES * | * | * | v * <finish> * * @endverbatim */ PreAcquireRequest(ImageCtxT &image_ctx, Context *on_finish); ImageCtxT &m_image_ctx; Context *m_on_finish; int m_error_result; void send_prepare_lock(); void handle_prepare_lock(int r); void send_flush_notifies(); void handle_flush_notifies(int r); void finish(); void save_result(int result) { if (m_error_result == 0 && result < 0) { m_error_result = result; } } }; } // namespace exclusive_lock } // namespace librbd extern template class librbd::exclusive_lock::PreAcquireRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_ACQUIRE_REQUEST_H
1,420
17.697368
82
h
null
ceph-main/src/librbd/exclusive_lock/PreReleaseRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/exclusive_lock/PreReleaseRequest.h" #include "common/AsyncOpTracker.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageState.h" #include "librbd/ImageWatcher.h" #include "librbd/Journal.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/exclusive_lock/ImageDispatch.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ImageDispatcherInterface.h" #include "librbd/io/ObjectDispatcherInterface.h" #include "librbd/io/Types.h" #include "librbd/PluginRegistry.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::exclusive_lock::PreReleaseRequest: " \ << this << " " << __func__ << ": " namespace librbd { namespace exclusive_lock { using util::create_async_context_callback; using util::create_context_callback; template <typename I> PreReleaseRequest<I>* PreReleaseRequest<I>::create( I &image_ctx, ImageDispatch<I>* image_dispatch, bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish) { return new PreReleaseRequest(image_ctx, image_dispatch, shutting_down, async_op_tracker, on_finish); } template <typename I> PreReleaseRequest<I>::PreReleaseRequest(I &image_ctx, ImageDispatch<I>* image_dispatch, bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish) : m_image_ctx(image_ctx), m_image_dispatch(image_dispatch), m_shutting_down(shutting_down), m_async_op_tracker(async_op_tracker), m_on_finish(create_async_context_callback(image_ctx, on_finish)) { } template <typename I> PreReleaseRequest<I>::~PreReleaseRequest() { if (!m_shutting_down) { m_image_ctx.state->handle_prepare_lock_complete(); } } template <typename I> void PreReleaseRequest<I>::send() { send_cancel_op_requests(); } template <typename I> void PreReleaseRequest<I>::send_cancel_op_requests() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PreReleaseRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_cancel_op_requests>(this); m_image_ctx.cancel_async_requests(ctx); } template <typename I> void PreReleaseRequest<I>::handle_cancel_op_requests(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; ceph_assert(r == 0); send_set_require_lock(); } template <typename I> void PreReleaseRequest<I>::send_set_require_lock() { if (!m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { // exclusive-lock was disabled, no need to block IOs send_wait_for_ops(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PreReleaseRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_set_require_lock>(this); // setting the lock as required will automatically cause the IO // queue to re-request the lock if any IO is queued if (m_image_ctx.clone_copy_on_read || m_image_ctx.test_features(RBD_FEATURE_JOURNALING) || m_image_ctx.test_features(RBD_FEATURE_DIRTY_CACHE)) { m_image_dispatch->set_require_lock(m_shutting_down, io::DIRECTION_BOTH, ctx); } else { m_image_dispatch->set_require_lock(m_shutting_down, io::DIRECTION_WRITE, ctx); } } template <typename I> void PreReleaseRequest<I>::handle_set_require_lock(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0) { // IOs are still flushed regardless of the error lderr(cct) << "failed to set lock: " << cpp_strerror(r) << dendl; } send_wait_for_ops(); } template <typename I> void PreReleaseRequest<I>::send_wait_for_ops() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; Context *ctx = create_context_callback< PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_wait_for_ops>(this); m_async_op_tracker.wait_for_ops(ctx); } template <typename I> void PreReleaseRequest<I>::handle_wait_for_ops(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; send_prepare_lock(); } template <typename I> void PreReleaseRequest<I>::send_prepare_lock() { if (m_shutting_down) { send_process_plugin_release_lock(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; // release the lock if the image is not busy performing other actions Context *ctx = create_context_callback< PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_prepare_lock>(this); m_image_ctx.state->prepare_lock(ctx); } template <typename I> void PreReleaseRequest<I>::handle_prepare_lock(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; send_process_plugin_release_lock(); } template <typename I> void PreReleaseRequest<I>::send_process_plugin_release_lock() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; std::shared_lock owner_lock{m_image_ctx.owner_lock}; Context *ctx = create_async_context_callback(m_image_ctx, create_context_callback< PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_process_plugin_release_lock>(this)); m_image_ctx.plugin_registry->prerelease_exclusive_lock(ctx); } template <typename I> void PreReleaseRequest<I>::handle_process_plugin_release_lock(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0) { lderr(cct) << "failed to handle plugins before releasing lock: " << cpp_strerror(r) << dendl; m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH); save_result(r); finish(); return; } send_invalidate_cache(); } template <typename I> void PreReleaseRequest<I>::send_invalidate_cache() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; Context *ctx = create_context_callback< PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_invalidate_cache>(this); m_image_ctx.io_image_dispatcher->invalidate_cache(ctx); } template <typename I> void PreReleaseRequest<I>::handle_invalidate_cache(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0 && r != -EBLOCKLISTED && r != -EBUSY) { lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r) << dendl; m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH); save_result(r); finish(); return; } send_flush_io(); } template <typename I> void PreReleaseRequest<I>::send_flush_io() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; // ensure that all in-flight IO is flushed -- skipping the refresh layer // since it should have been flushed when the lock was required and now // refreshes are disabled / interlocked w/ this state machine. auto ctx = create_context_callback< PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_flush_io>(this); auto aio_comp = io::AioCompletion::create_and_start( ctx, util::get_image_ctx(&m_image_ctx), librbd::io::AIO_TYPE_FLUSH); auto req = io::ImageDispatchSpec::create_flush( m_image_ctx, io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, aio_comp, io::FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH, {}); req->send(); } template <typename I> void PreReleaseRequest<I>::handle_flush_io(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0) { lderr(cct) << "failed to flush IO: " << cpp_strerror(r) << dendl; } send_flush_notifies(); } template <typename I> void PreReleaseRequest<I>::send_flush_notifies() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PreReleaseRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_flush_notifies>(this); m_image_ctx.image_watcher->flush(ctx); } template <typename I> void PreReleaseRequest<I>::handle_flush_notifies(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; ceph_assert(r == 0); send_close_journal(); } template <typename I> void PreReleaseRequest<I>::send_close_journal() { { std::unique_lock image_locker{m_image_ctx.image_lock}; std::swap(m_journal, m_image_ctx.journal); } if (m_journal == nullptr) { send_close_object_map(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PreReleaseRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_close_journal>( this); m_journal->close(ctx); } template <typename I> void PreReleaseRequest<I>::handle_close_journal(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0) { // error implies some journal events were not flushed -- continue lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl; } m_journal->put(); m_journal = nullptr; send_close_object_map(); } template <typename I> void PreReleaseRequest<I>::send_close_object_map() { { std::unique_lock image_locker{m_image_ctx.image_lock}; std::swap(m_object_map, m_image_ctx.object_map); } if (m_object_map == nullptr) { send_unlock(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; using klass = PreReleaseRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_close_object_map>(this, m_object_map); m_object_map->close(ctx); } template <typename I> void PreReleaseRequest<I>::handle_close_object_map(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; if (r < 0) { lderr(cct) << "failed to close object map: " << cpp_strerror(r) << dendl; } m_object_map->put(); send_unlock(); } template <typename I> void PreReleaseRequest<I>::send_unlock() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; finish(); } template <typename I> void PreReleaseRequest<I>::finish() { m_on_finish->complete(m_error_result); delete this; } } // namespace exclusive_lock } // namespace librbd template class librbd::exclusive_lock::PreReleaseRequest<librbd::ImageCtx>;
10,457
27.730769
84
cc
null
ceph-main/src/librbd/exclusive_lock/PreReleaseRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H #include "librbd/ImageCtx.h" #include <string> class AsyncOpTracker; class Context; namespace librbd { struct ImageCtx; namespace exclusive_lock { template <typename> struct ImageDispatch; template <typename ImageCtxT = ImageCtx> class PreReleaseRequest { public: static PreReleaseRequest* create(ImageCtxT &image_ctx, ImageDispatch<ImageCtxT>* image_dispatch, bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish); ~PreReleaseRequest(); void send(); private: /** * @verbatim * * <start> * | * v * CANCEL_OP_REQUESTS * | * v * SET_REQUIRE_LOCK * | * v * WAIT_FOR_OPS * | * v * PREPARE_LOCK * | * v * PROCESS_PLUGIN_RELEASE * | * v * SHUT_DOWN_IMAGE_CACHE * | * v * INVALIDATE_CACHE * | * v * FLUSH_IO * | * v * FLUSH_NOTIFIES . . . . . . . . . . . . . . * | . * v . * CLOSE_JOURNAL . * | (journal disabled, . * v object map enabled) . * CLOSE_OBJECT_MAP < . . . . . . . . . . . . * | . * v (object map disabled) . * <finish> < . . . . . . . . . . . . . . . . . * * @endverbatim */ PreReleaseRequest(ImageCtxT &image_ctx, ImageDispatch<ImageCtxT>* image_dispatch, bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish); ImageCtxT &m_image_ctx; ImageDispatch<ImageCtxT>* m_image_dispatch; bool m_shutting_down; AsyncOpTracker &m_async_op_tracker; Context *m_on_finish; int m_error_result = 0; decltype(m_image_ctx.object_map) m_object_map = nullptr; decltype(m_image_ctx.journal) m_journal = nullptr; void send_cancel_op_requests(); void handle_cancel_op_requests(int r); void send_set_require_lock(); void handle_set_require_lock(int r); void send_wait_for_ops(); void handle_wait_for_ops(int r); void send_prepare_lock(); void handle_prepare_lock(int r); void send_process_plugin_release_lock(); void handle_process_plugin_release_lock(int r); void send_invalidate_cache(); void handle_invalidate_cache(int r); void send_flush_io(); void handle_flush_io(int r); void send_flush_notifies(); void handle_flush_notifies(int r); void send_close_journal(); void handle_close_journal(int r); void send_close_object_map(); void handle_close_object_map(int r); void send_unlock(); void finish(); void save_result(int result) { if (m_error_result == 0 && result < 0) { m_error_result = result; } } }; } // namespace exclusive_lock } // namespace librbd #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
3,252
22.235714
76
h
null
ceph-main/src/librbd/exclusive_lock/StandardPolicy.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/exclusive_lock/StandardPolicy.h" #include "librbd/ImageCtx.h" #include "librbd/ExclusiveLock.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::ExclusiveLock::StandardPolicy " namespace librbd { namespace exclusive_lock { template <typename I> int StandardPolicy<I>::lock_requested(bool force) { ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock)); ceph_assert(m_image_ctx->exclusive_lock != nullptr); ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force << dendl; return -EROFS; } } // namespace exclusive_lock } // namespace librbd template class librbd::exclusive_lock::StandardPolicy<librbd::ImageCtx>;
825
26.533333
79
cc
null
ceph-main/src/librbd/exclusive_lock/StandardPolicy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H #include "librbd/exclusive_lock/Policy.h" namespace librbd { struct ImageCtx; namespace exclusive_lock { template <typename ImageCtxT = ImageCtx> class StandardPolicy : public Policy { public: StandardPolicy(ImageCtxT* image_ctx) : m_image_ctx(image_ctx) { } bool may_auto_request_lock() override { return false; } int lock_requested(bool force) override; private: ImageCtxT* m_image_ctx; }; } // namespace exclusive_lock } // namespace librbd extern template class librbd::exclusive_lock::StandardPolicy<librbd::ImageCtx>; #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H
812
20.394737
79
h
null
ceph-main/src/librbd/image/AttachChildRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/AttachChildRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/image/RefreshRequest.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::AttachChildRequest: " << this \ << " " << __func__ << ": " namespace librbd { namespace image { using util::create_context_callback; using util::create_rados_callback; template <typename I> AttachChildRequest<I>::AttachChildRequest( I *image_ctx, I *parent_image_ctx, const librados::snap_t &parent_snap_id, I *old_parent_image_ctx, const librados::snap_t &old_parent_snap_id, uint32_t clone_format, Context* on_finish) : m_image_ctx(image_ctx), m_parent_image_ctx(parent_image_ctx), m_parent_snap_id(parent_snap_id), m_old_parent_image_ctx(old_parent_image_ctx), m_old_parent_snap_id(old_parent_snap_id), m_clone_format(clone_format), m_on_finish(on_finish), m_cct(m_image_ctx->cct) { } template <typename I> void AttachChildRequest<I>::send() { if (m_clone_format == 1) { v1_add_child(); } else { v2_set_op_feature(); } } template <typename I> void AttachChildRequest<I>::v1_add_child() { ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::add_child(&op, {m_parent_image_ctx->md_ctx.get_id(), "", m_parent_image_ctx->id, m_parent_snap_id}, m_image_ctx->id); using klass = AttachChildRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_v1_add_child>(this); int r = m_image_ctx->md_ctx.aio_operate(RBD_CHILDREN, comp, &op); ceph_assert(r == 0); comp->release(); } template <typename I> void AttachChildRequest<I>::handle_v1_add_child(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { if (r == -EEXIST && m_old_parent_image_ctx != nullptr) { ldout(m_cct, 5) << "child already exists" << dendl; } else { lderr(m_cct) << "couldn't add child: " << cpp_strerror(r) << dendl; finish(r); return; } } v1_refresh(); } template <typename I> void AttachChildRequest<I>::v1_refresh() { ldout(m_cct, 15) << dendl; using klass = AttachChildRequest<I>; RefreshRequest<I> *req = RefreshRequest<I>::create( *m_parent_image_ctx, false, false, create_context_callback<klass, &klass::handle_v1_refresh>(this)); req->send(); } template <typename I> void AttachChildRequest<I>::handle_v1_refresh(int r) { ldout(m_cct, 15) << "r=" << r << dendl; bool snap_protected = false; if (r == 0) { std::shared_lock image_locker{m_parent_image_ctx->image_lock}; r = m_parent_image_ctx->is_snap_protected(m_parent_snap_id, &snap_protected); } if (r < 0 || !snap_protected) { lderr(m_cct) << "validate protected failed" << dendl; finish(-EINVAL); return; } v1_remove_child_from_old_parent(); } template <typename I> void AttachChildRequest<I>::v1_remove_child_from_old_parent() { if (m_old_parent_image_ctx == nullptr) { finish(0); return; } ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::remove_child(&op, {m_old_parent_image_ctx->md_ctx.get_id(), m_old_parent_image_ctx->md_ctx.get_namespace(), m_old_parent_image_ctx->id, m_old_parent_snap_id}, m_image_ctx->id); using klass = AttachChildRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v1_remove_child_from_old_parent>(this); int r = m_image_ctx->md_ctx.aio_operate(RBD_CHILDREN, comp, &op); ceph_assert(r == 0); comp->release(); } template <typename I> void AttachChildRequest<I>::handle_v1_remove_child_from_old_parent(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "couldn't remove child: " << cpp_strerror(r) << dendl; finish(r); return; } finish(0); } template <typename I> void AttachChildRequest<I>::v2_set_op_feature() { ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::op_features_set(&op, RBD_OPERATION_FEATURE_CLONE_CHILD, RBD_OPERATION_FEATURE_CLONE_CHILD); using klass = AttachChildRequest<I>; auto aio_comp = create_rados_callback< klass, &klass::handle_v2_set_op_feature>(this); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp, &op); ceph_assert(r == 0); aio_comp->release(); } template <typename I> void AttachChildRequest<I>::handle_v2_set_op_feature(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "failed to enable clone v2: " << cpp_strerror(r) << dendl; finish(r); return; } v2_child_attach(); } template <typename I> void AttachChildRequest<I>::v2_child_attach() { ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::child_attach(&op, m_parent_snap_id, {m_image_ctx->md_ctx.get_id(), m_image_ctx->md_ctx.get_namespace(), m_image_ctx->id}); using klass = AttachChildRequest<I>; auto aio_comp = create_rados_callback< klass, &klass::handle_v2_child_attach>(this); int r = m_parent_image_ctx->md_ctx.aio_operate(m_parent_image_ctx->header_oid, aio_comp, &op); ceph_assert(r == 0); aio_comp->release(); } template <typename I> void AttachChildRequest<I>::handle_v2_child_attach(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { if (r == -EEXIST && m_old_parent_image_ctx != nullptr) { ldout(m_cct, 5) << "child already exists" << dendl; } else { lderr(m_cct) << "failed to attach child image: " << cpp_strerror(r) << dendl; finish(r); return; } } v2_child_detach_from_old_parent(); } template <typename I> void AttachChildRequest<I>::v2_child_detach_from_old_parent() { if (m_old_parent_image_ctx == nullptr) { finish(0); return; } ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::child_detach(&op, m_old_parent_snap_id, {m_image_ctx->md_ctx.get_id(), m_image_ctx->md_ctx.get_namespace(), m_image_ctx->id}); using klass = AttachChildRequest<I>; auto aio_comp = create_rados_callback< klass, &klass::handle_v2_child_detach_from_old_parent>(this); int r = m_old_parent_image_ctx->md_ctx.aio_operate( m_old_parent_image_ctx->header_oid, aio_comp, &op); ceph_assert(r == 0); aio_comp->release(); } template <typename I> void AttachChildRequest<I>::handle_v2_child_detach_from_old_parent(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "failed to detach child image: " << cpp_strerror(r) << dendl; finish(r); return; } finish(0); } template <typename I> void AttachChildRequest<I>::finish(int r) { ldout(m_cct, 5) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::AttachChildRequest<librbd::ImageCtx>;
7,622
28.09542
80
cc
null
ceph-main/src/librbd/image/AttachChildRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H #define CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H #include "include/common_fwd.h" #include "include/int_types.h" #include "include/rados/librados.hpp" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class AttachChildRequest { public: static AttachChildRequest* create(ImageCtxT *image_ctx, ImageCtxT *parent_image_ctx, const librados::snap_t &parent_snap_id, ImageCtxT *old_parent_image_ctx, const librados::snap_t &old_parent_snap_id, uint32_t clone_format, Context* on_finish) { return new AttachChildRequest(image_ctx, parent_image_ctx, parent_snap_id, old_parent_image_ctx, old_parent_snap_id, clone_format, on_finish); } AttachChildRequest(ImageCtxT *image_ctx, ImageCtxT *parent_image_ctx, const librados::snap_t &parent_snap_id, ImageCtxT *old_parent_image_ctx, const librados::snap_t &old_parent_snap_id, uint32_t clone_format, Context* on_finish); void send(); private: /** * @verbatim * * <start> * (clone v1) | (clone v2) * /----------------/ \---------------\ * | | * v v * V1 ADD CHILD V2 SET CLONE * | | * v v * V1 VALIDATE PROTECTED V2 ATTACH CHILD * | | * | v * V1 REMOVE CHILD FROM OLD PARENT V2 DETACH CHILD FROM OLD PARENT * | | * \----------------\ /---------------/ * | * v * <finish> * * @endverbatim */ ImageCtxT *m_image_ctx; ImageCtxT *m_parent_image_ctx; librados::snap_t m_parent_snap_id; ImageCtxT *m_old_parent_image_ctx; librados::snap_t m_old_parent_snap_id; uint32_t m_clone_format; Context* m_on_finish; CephContext *m_cct; void v1_add_child(); void handle_v1_add_child(int r); void v1_refresh(); void handle_v1_refresh(int r); void v1_remove_child_from_old_parent(); void handle_v1_remove_child_from_old_parent(int r); void v2_set_op_feature(); void handle_v2_set_op_feature(int r); void v2_child_attach(); void handle_v2_child_attach(int r); void v2_child_detach_from_old_parent(); void handle_v2_child_detach_from_old_parent(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::AttachChildRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H
3,250
29.669811
80
h
null
ceph-main/src/librbd/image/AttachParentRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/AttachParentRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::AttachParentRequest: " << this \ << " " << __func__ << ": " namespace librbd { namespace image { using util::create_rados_callback; template <typename I> void AttachParentRequest<I>::send() { attach_parent(); } template <typename I> void AttachParentRequest<I>::attach_parent() { auto cct = m_image_ctx.cct; ldout(cct, 5) << "parent_image_spec=" << m_parent_image_spec << dendl; librados::ObjectWriteOperation op; if (!m_legacy_parent) { librbd::cls_client::parent_attach(&op, m_parent_image_spec, m_parent_overlap, m_reattach); } else { librbd::cls_client::set_parent(&op, m_parent_image_spec, m_parent_overlap); } auto aio_comp = create_rados_callback< AttachParentRequest<I>, &AttachParentRequest<I>::handle_attach_parent>(this); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op); ceph_assert(r == 0); aio_comp->release(); } template <typename I> void AttachParentRequest<I>::handle_attach_parent(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; if (!m_legacy_parent && r == -EOPNOTSUPP && !m_reattach) { if (m_parent_image_spec.pool_namespace == m_image_ctx.md_ctx.get_namespace()) { m_parent_image_spec.pool_namespace = ""; } if (m_parent_image_spec.pool_namespace.empty()) { ldout(cct, 10) << "retrying using legacy parent method" << dendl; m_legacy_parent = true; attach_parent(); return; } // namespaces require newer OSDs r = -EXDEV; } if (r < 0) { lderr(cct) << "attach parent encountered an error: " << cpp_strerror(r) << dendl; finish(r); return; } finish(0); } template <typename I> void AttachParentRequest<I>::finish(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::AttachParentRequest<librbd::ImageCtx>;
2,419
25.593407
80
cc
null
ceph-main/src/librbd/image/AttachParentRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H #define CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "librbd/Types.h" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class AttachParentRequest { public: static AttachParentRequest* create(ImageCtxT& image_ctx, const cls::rbd::ParentImageSpec& pspec, uint64_t parent_overlap, bool reattach, Context* on_finish) { return new AttachParentRequest(image_ctx, pspec, parent_overlap, reattach, on_finish); } AttachParentRequest(ImageCtxT& image_ctx, const cls::rbd::ParentImageSpec& pspec, uint64_t parent_overlap, bool reattach, Context* on_finish) : m_image_ctx(image_ctx), m_parent_image_spec(pspec), m_parent_overlap(parent_overlap), m_reattach(reattach), m_on_finish(on_finish) { } void send(); private: /** * @verbatim * * <start> * | * * * * * * * | * * -EOPNOTSUPP * v v * * ATTACH_PARENT * * * * | * v * <finish> * * @endverbatim */ ImageCtxT& m_image_ctx; cls::rbd::ParentImageSpec m_parent_image_spec; uint64_t m_parent_overlap; bool m_reattach; Context* m_on_finish; bool m_legacy_parent = false; void attach_parent(); void handle_attach_parent(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::AttachParentRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H
1,961
23.525
78
h
null
ceph-main/src/librbd/image/CloneRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "cls/rbd/cls_rbd_client.h" #include "cls/rbd/cls_rbd_types.h" #include "common/dout.h" #include "common/errno.h" #include "include/ceph_assert.h" #include "librbd/ImageState.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/deep_copy/MetadataCopyRequest.h" #include "librbd/image/AttachChildRequest.h" #include "librbd/image/AttachParentRequest.h" #include "librbd/image/CloneRequest.h" #include "librbd/image/CreateRequest.h" #include "librbd/image/RemoveRequest.h" #include "librbd/image/Types.h" #include "librbd/mirror/EnableRequest.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::CloneRequest: " << this << " " \ << __func__ << ": " #define MAX_KEYS 64 namespace librbd { namespace image { using util::create_rados_callback; using util::create_context_callback; using util::create_async_context_callback; template <typename I> CloneRequest<I>::CloneRequest( ConfigProxy& config, IoCtx& parent_io_ctx, const std::string& parent_image_id, const std::string& parent_snap_name, const cls::rbd::SnapshotNamespace& parent_snap_namespace, uint64_t parent_snap_id, IoCtx &c_ioctx, const std::string &c_name, const std::string &c_id, ImageOptions c_options, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish) : m_config(config), m_parent_io_ctx(parent_io_ctx), m_parent_image_id(parent_image_id), m_parent_snap_name(parent_snap_name), m_parent_snap_namespace(parent_snap_namespace), m_parent_snap_id(parent_snap_id), m_ioctx(c_ioctx), m_name(c_name), m_id(c_id), m_opts(c_options), m_mirror_image_mode(mirror_image_mode), m_non_primary_global_image_id(non_primary_global_image_id), m_primary_mirror_uuid(primary_mirror_uuid), m_op_work_queue(op_work_queue), m_on_finish(on_finish), m_use_p_features(true) { m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct()); bool default_format_set; m_opts.is_set(RBD_IMAGE_OPTION_FORMAT, &default_format_set); if (!default_format_set) { m_opts.set(RBD_IMAGE_OPTION_FORMAT, static_cast<uint64_t>(2)); } ldout(m_cct, 20) << "parent_pool_id=" << parent_io_ctx.get_id() << ", " << "parent_image_id=" << parent_image_id << ", " << "parent_snap=" << parent_snap_name << "/" << parent_snap_id << " clone to " << "pool_id=" << m_ioctx.get_id() << ", " << "name=" << m_name << ", " << "opts=" << m_opts << dendl; } template <typename I> void CloneRequest<I>::send() { ldout(m_cct, 20) << dendl; validate_options(); } template <typename I> void CloneRequest<I>::validate_options() { ldout(m_cct, 20) << dendl; uint64_t format = 0; m_opts.get(RBD_IMAGE_OPTION_FORMAT, &format); if (format < 2) { lderr(m_cct) << "format 2 or later required for clone" << dendl; complete(-EINVAL); return; } if (m_opts.get(RBD_IMAGE_OPTION_FEATURES, &m_features) == 0) { if (m_features & ~RBD_FEATURES_ALL) { lderr(m_cct) << "librbd does not support requested features" << dendl; complete(-ENOSYS); return; } m_use_p_features = false; } if (m_opts.get(RBD_IMAGE_OPTION_CLONE_FORMAT, &m_clone_format) < 0) { std::string default_clone_format = m_config.get_val<std::string>( "rbd_default_clone_format"); if (default_clone_format == "1") { m_clone_format = 1; } else if (default_clone_format == "auto") { librados::Rados rados(m_ioctx); int8_t min_compat_client; int8_t require_min_compat_client; int r = rados.get_min_compatible_client(&min_compat_client, &require_min_compat_client); if (r < 0) { complete(r); return; } if (std::max(min_compat_client, require_min_compat_client) < CEPH_RELEASE_MIMIC) { m_clone_format = 1; } } } if (m_clone_format == 1 && m_parent_io_ctx.get_namespace() != m_ioctx.get_namespace()) { ldout(m_cct, 1) << "clone v2 required for cross-namespace clones" << dendl; complete(-EXDEV); return; } open_parent(); } template <typename I> void CloneRequest<I>::open_parent() { ldout(m_cct, 20) << dendl; ceph_assert(m_parent_snap_name.empty() ^ (m_parent_snap_id == CEPH_NOSNAP)); if (m_parent_snap_id != CEPH_NOSNAP) { m_parent_image_ctx = I::create("", m_parent_image_id, m_parent_snap_id, m_parent_io_ctx, true); } else { m_parent_image_ctx = I::create("", m_parent_image_id, m_parent_snap_name.c_str(), m_parent_io_ctx, true); m_parent_image_ctx->snap_namespace = m_parent_snap_namespace; } Context *ctx = create_context_callback< CloneRequest<I>, &CloneRequest<I>::handle_open_parent>(this); m_parent_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT, ctx); } template <typename I> void CloneRequest<I>::handle_open_parent(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0) { m_parent_image_ctx = nullptr; lderr(m_cct) << "failed to open parent image: " << cpp_strerror(r) << dendl; complete(r); return; } m_parent_snap_id = m_parent_image_ctx->snap_id; m_pspec = {m_parent_io_ctx.get_id(), m_parent_io_ctx.get_namespace(), m_parent_image_id, m_parent_snap_id}; validate_parent(); } template <typename I> void CloneRequest<I>::validate_parent() { ldout(m_cct, 20) << dendl; if (m_parent_image_ctx->operations_disabled) { lderr(m_cct) << "image operations disabled due to unsupported op features" << dendl; m_r_saved = -EROFS; close_parent(); return; } if (m_parent_image_ctx->snap_id == CEPH_NOSNAP) { lderr(m_cct) << "image to be cloned must be a snapshot" << dendl; m_r_saved = -EINVAL; close_parent(); return; } if (m_parent_image_ctx->old_format) { lderr(m_cct) << "parent image must be in new format" << dendl; m_r_saved = -EINVAL; close_parent(); return; } m_parent_image_ctx->image_lock.lock_shared(); uint64_t p_features = m_parent_image_ctx->features; m_size = m_parent_image_ctx->get_image_size(m_parent_image_ctx->snap_id); bool snap_protected; int r = m_parent_image_ctx->is_snap_protected(m_parent_image_ctx->snap_id, &snap_protected); m_parent_image_ctx->image_lock.unlock_shared(); if ((p_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) { lderr(m_cct) << "parent image must support layering" << dendl; m_r_saved = -ENOSYS; close_parent(); return; } if (m_use_p_features) { m_features = p_features; } if (r < 0) { lderr(m_cct) << "unable to locate parent's snapshot" << dendl; m_r_saved = r; close_parent(); return; } if (m_clone_format == 1 && !snap_protected) { lderr(m_cct) << "parent snapshot must be protected" << dendl; m_r_saved = -EINVAL; close_parent(); return; } validate_child(); } template <typename I> void CloneRequest<I>::validate_child() { ldout(m_cct, 15) << dendl; if ((m_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) { lderr(m_cct) << "cloning image must support layering" << dendl; m_r_saved = -ENOSYS; close_parent(); return; } using klass = CloneRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_validate_child>(this); librados::ObjectReadOperation op; op.stat(NULL, NULL, NULL); int r = m_ioctx.aio_operate(util::old_header_name(m_name), comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> void CloneRequest<I>::handle_validate_child(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r != -ENOENT) { lderr(m_cct) << "rbd image " << m_name << " already exists" << dendl; m_r_saved = r; close_parent(); return; } create_child(); } template <typename I> void CloneRequest<I>::create_child() { ldout(m_cct, 15) << dendl; uint64_t order = m_parent_image_ctx->order; if (m_opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) { m_opts.set(RBD_IMAGE_OPTION_ORDER, order); } m_opts.set(RBD_IMAGE_OPTION_FEATURES, m_features); uint64_t stripe_unit = m_parent_image_ctx->stripe_unit; if (m_opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) { m_opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit); } uint64_t stripe_count = m_parent_image_ctx->stripe_count; if (m_opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) { m_opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count); } using klass = CloneRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_create_child>(this); auto req = CreateRequest<I>::create( m_config, m_ioctx, m_name, m_id, m_size, m_opts, image::CREATE_FLAG_SKIP_MIRROR_ENABLE, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, m_non_primary_global_image_id, m_primary_mirror_uuid, m_op_work_queue, ctx); req->send(); } template <typename I> void CloneRequest<I>::handle_create_child(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r == -EBADF) { ldout(m_cct, 5) << "image id already in-use" << dendl; complete(r); return; } else if (r < 0) { lderr(m_cct) << "error creating child: " << cpp_strerror(r) << dendl; m_r_saved = r; close_parent(); return; } open_child(); } template <typename I> void CloneRequest<I>::open_child() { ldout(m_cct, 15) << dendl; m_imctx = I::create(m_name, "", nullptr, m_ioctx, false); using klass = CloneRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_open_child>(this); uint64_t flags = OPEN_FLAG_SKIP_OPEN_PARENT; if ((m_features & RBD_FEATURE_MIGRATING) != 0) { flags |= OPEN_FLAG_IGNORE_MIGRATING; } m_imctx->state->open(flags, ctx); } template <typename I> void CloneRequest<I>::handle_open_child(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { m_imctx = nullptr; lderr(m_cct) << "Error opening new image: " << cpp_strerror(r) << dendl; m_r_saved = r; remove_child(); return; } attach_parent(); } template <typename I> void CloneRequest<I>::attach_parent() { ldout(m_cct, 15) << dendl; auto ctx = create_context_callback< CloneRequest<I>, &CloneRequest<I>::handle_attach_parent>(this); auto req = AttachParentRequest<I>::create( *m_imctx, m_pspec, m_size, false, ctx); req->send(); } template <typename I> void CloneRequest<I>::handle_attach_parent(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "failed to attach parent: " << cpp_strerror(r) << dendl; m_r_saved = r; close_child(); return; } attach_child(); } template <typename I> void CloneRequest<I>::attach_child() { ldout(m_cct, 15) << dendl; auto ctx = create_context_callback< CloneRequest<I>, &CloneRequest<I>::handle_attach_child>(this); auto req = AttachChildRequest<I>::create( m_imctx, m_parent_image_ctx, m_parent_image_ctx->snap_id, nullptr, 0, m_clone_format, ctx); req->send(); } template <typename I> void CloneRequest<I>::handle_attach_child(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "failed to attach parent: " << cpp_strerror(r) << dendl; m_r_saved = r; close_child(); return; } copy_metadata(); } template <typename I> void CloneRequest<I>::copy_metadata() { ldout(m_cct, 15) << dendl; auto ctx = create_context_callback< CloneRequest<I>, &CloneRequest<I>::handle_copy_metadata>(this); auto req = deep_copy::MetadataCopyRequest<I>::create( m_parent_image_ctx, m_imctx, ctx); req->send(); } template <typename I> void CloneRequest<I>::handle_copy_metadata(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl; m_r_saved = r; close_child(); return; } get_mirror_mode(); } template <typename I> void CloneRequest<I>::get_mirror_mode() { ldout(m_cct, 15) << dendl; uint64_t mirror_image_mode; if (!m_non_primary_global_image_id.empty()) { enable_mirror(); return; } else if (m_opts.get(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE, &mirror_image_mode) == 0) { m_mirror_image_mode = static_cast<cls::rbd::MirrorImageMode>( mirror_image_mode); enable_mirror(); return; } else if (!m_imctx->test_features(RBD_FEATURE_JOURNALING)) { close_child(); return; } librados::ObjectReadOperation op; cls_client::mirror_mode_get_start(&op); using klass = CloneRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_get_mirror_mode>(this); m_out_bl.clear(); m_imctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); comp->release(); } template <typename I> void CloneRequest<I>::handle_get_mirror_mode(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r == 0) { auto it = m_out_bl.cbegin(); r = cls_client::mirror_mode_get_finish(&it, &m_mirror_mode); } if (r < 0 && r != -ENOENT) { lderr(m_cct) << "failed to retrieve mirror mode: " << cpp_strerror(r) << dendl; m_r_saved = r; } else if (m_mirror_mode == cls::rbd::MIRROR_MODE_POOL) { m_mirror_image_mode = cls::rbd::MIRROR_IMAGE_MODE_JOURNAL; enable_mirror(); return; } close_child(); } template <typename I> void CloneRequest<I>::enable_mirror() { ldout(m_cct, 15) << dendl; using klass = CloneRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_enable_mirror>(this); auto req = mirror::EnableRequest<I>::create( m_imctx, m_mirror_image_mode, m_non_primary_global_image_id, true, ctx); req->send(); } template <typename I> void CloneRequest<I>::handle_enable_mirror(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "failed to enable mirroring: " << cpp_strerror(r) << dendl; m_r_saved = r; } close_child(); } template <typename I> void CloneRequest<I>::close_child() { ldout(m_cct, 15) << dendl; ceph_assert(m_imctx != nullptr); auto ctx = create_context_callback< CloneRequest<I>, &CloneRequest<I>::handle_close_child>(this); m_imctx->state->close(ctx); } template <typename I> void CloneRequest<I>::handle_close_child(int r) { ldout(m_cct, 15) << dendl; m_imctx = nullptr; if (r < 0) { lderr(m_cct) << "couldn't close image: " << cpp_strerror(r) << dendl; if (m_r_saved == 0) { m_r_saved = r; } } if (m_r_saved < 0) { remove_child(); return; } close_parent(); } template <typename I> void CloneRequest<I>::remove_child() { ldout(m_cct, 15) << dendl; using klass = CloneRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_remove_child>(this); auto req = librbd::image::RemoveRequest<I>::create( m_ioctx, m_name, m_id, false, false, m_no_op, m_op_work_queue, ctx); req->send(); } template <typename I> void CloneRequest<I>::handle_remove_child(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "Error removing failed clone: " << cpp_strerror(r) << dendl; } close_parent(); } template <typename I> void CloneRequest<I>::close_parent() { ldout(m_cct, 20) << dendl; ceph_assert(m_parent_image_ctx != nullptr); auto ctx = create_context_callback< CloneRequest<I>, &CloneRequest<I>::handle_close_parent>(this); m_parent_image_ctx->state->close(ctx); } template <typename I> void CloneRequest<I>::handle_close_parent(int r) { ldout(m_cct, 20) << "r=" << r << dendl; m_parent_image_ctx = nullptr; if (r < 0) { lderr(m_cct) << "failed to close parent image: " << cpp_strerror(r) << dendl; if (m_r_saved == 0) { m_r_saved = r; } } complete(m_r_saved); } template <typename I> void CloneRequest<I>::complete(int r) { ldout(m_cct, 15) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } //namespace image } //namespace librbd template class librbd::image::CloneRequest<librbd::ImageCtx>;
16,558
26.235197
94
cc
null
ceph-main/src/librbd/image/CloneRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H #define CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H #include "cls/rbd/cls_rbd_types.h" #include "common/config_fwd.h" #include "librbd/internal.h" #include "include/rbd/librbd.hpp" class Context; using librados::IoCtx; namespace librbd { namespace asio { struct ContextWQ; } namespace image { template <typename ImageCtxT = ImageCtx> class CloneRequest { public: static CloneRequest *create( ConfigProxy& config, IoCtx& parent_io_ctx, const std::string& parent_image_id, const std::string& parent_snap_name, const cls::rbd::SnapshotNamespace& parent_snap_namespace, uint64_t parent_snap_id, IoCtx &c_ioctx, const std::string &c_name, const std::string &c_id, ImageOptions c_options, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish) { return new CloneRequest(config, parent_io_ctx, parent_image_id, parent_snap_name, parent_snap_namespace, parent_snap_id, c_ioctx, c_name, c_id, c_options, mirror_image_mode, non_primary_global_image_id, primary_mirror_uuid, op_work_queue, on_finish); } CloneRequest(ConfigProxy& config, IoCtx& parent_io_ctx, const std::string& parent_image_id, const std::string& parent_snap_name, const cls::rbd::SnapshotNamespace& parent_snap_namespace, uint64_t parent_snap_id, IoCtx &c_ioctx, const std::string &c_name, const std::string &c_id, ImageOptions c_options, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish); void send(); private: /** * @verbatim * * <start> * | * v * OPEN PARENT * | * v * VALIDATE CHILD <finish> * | ^ * v | * CREATE CHILD * * * * * * * * * > CLOSE PARENT * | ^ * v | * OPEN CHILD * * * * * * * * * * > REMOVE CHILD * | ^ * v | * ATTACH PARENT * * * * * * * * > CLOSE CHILD * | ^ * v * * ATTACH CHILD * * * * * * * * * * * * * | * * v * * COPY META DATA * * * * * * * * * * ^ * | * * v (skip if not needed) * * GET MIRROR MODE * * * * * * * * * ^ * | * * v (skip if not needed) * * SET MIRROR ENABLED * * * * * * * * * * | * v * CLOSE CHILD * | * v * CLOSE PARENT * | * v * <finish> * * @endverbatim */ ConfigProxy& m_config; IoCtx &m_parent_io_ctx; std::string m_parent_image_id; std::string m_parent_snap_name; cls::rbd::SnapshotNamespace m_parent_snap_namespace; uint64_t m_parent_snap_id; ImageCtxT *m_parent_image_ctx; IoCtx &m_ioctx; std::string m_name; std::string m_id; ImageOptions m_opts; cls::rbd::ParentImageSpec m_pspec; ImageCtxT *m_imctx; cls::rbd::MirrorMode m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED; cls::rbd::MirrorImageMode m_mirror_image_mode; const std::string m_non_primary_global_image_id; const std::string m_primary_mirror_uuid; NoOpProgressContext m_no_op; asio::ContextWQ *m_op_work_queue; Context *m_on_finish; CephContext *m_cct; uint64_t m_clone_format = 2; bool m_use_p_features; uint64_t m_features; bufferlist m_out_bl; uint64_t m_size; int m_r_saved = 0; void validate_options(); void open_parent(); void handle_open_parent(int r); void validate_parent(); void validate_child(); void handle_validate_child(int r); void create_child(); void handle_create_child(int r); void open_child(); void handle_open_child(int r); void attach_parent(); void handle_attach_parent(int r); void attach_child(); void handle_attach_child(int r); void copy_metadata(); void handle_copy_metadata(int r); void get_mirror_mode(); void handle_get_mirror_mode(int r); void enable_mirror(); void handle_enable_mirror(int r); void close_child(); void handle_close_child(int r); void remove_child(); void handle_remove_child(int r); void close_parent(); void handle_close_parent(int r); void complete(int r); }; } //namespace image } //namespace librbd extern template class librbd::image::CloneRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H
5,123
27.153846
77
h
null
ceph-main/src/librbd/image/CloseRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/CloseRequest.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/ConfigWatcher.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/ImageWatcher.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageDispatcher.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ObjectDispatcherInterface.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::CloseRequest: " namespace librbd { namespace image { using util::create_async_context_callback; using util::create_context_callback; template <typename I> CloseRequest<I>::CloseRequest(I *image_ctx, Context *on_finish) : m_image_ctx(image_ctx), m_on_finish(on_finish), m_error_result(0), m_exclusive_lock(nullptr) { ceph_assert(image_ctx != nullptr); } template <typename I> void CloseRequest<I>::send() { if (m_image_ctx->config_watcher != nullptr) { m_image_ctx->config_watcher->shut_down(); delete m_image_ctx->config_watcher; m_image_ctx->config_watcher = nullptr; } send_block_image_watcher(); } template <typename I> void CloseRequest<I>::send_block_image_watcher() { if (m_image_ctx->image_watcher == nullptr) { send_shut_down_update_watchers(); return; } CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; // prevent incoming requests from our peers m_image_ctx->image_watcher->block_notifies(create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_block_image_watcher>(this)); } template <typename I> void CloseRequest<I>::handle_block_image_watcher(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; send_shut_down_update_watchers(); } template <typename I> void CloseRequest<I>::send_shut_down_update_watchers() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->state->shut_down_update_watchers(create_async_context_callback( *m_image_ctx, create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_shut_down_update_watchers>(this))); } template <typename I> void CloseRequest<I>::handle_shut_down_update_watchers(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to shut down update watchers: " << cpp_strerror(r) << dendl; } send_flush(); } template <typename I> void CloseRequest<I>::send_flush() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; std::shared_lock owner_locker{m_image_ctx->owner_lock}; auto ctx = create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_flush>(this); auto aio_comp = io::AioCompletion::create_and_start(ctx, m_image_ctx, io::AIO_TYPE_FLUSH); auto req = io::ImageDispatchSpec::create_flush( *m_image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp, io::FLUSH_SOURCE_SHUTDOWN, {}); req->send(); } template <typename I> void CloseRequest<I>::handle_flush(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; if (r < 0) { lderr(cct) << "failed to flush IO: " << cpp_strerror(r) << dendl; } send_shut_down_exclusive_lock(); } template <typename I> void CloseRequest<I>::send_shut_down_exclusive_lock() { { std::unique_lock owner_locker{m_image_ctx->owner_lock}; m_exclusive_lock = m_image_ctx->exclusive_lock; // if reading a snapshot -- possible object map is open std::unique_lock image_locker{m_image_ctx->image_lock}; if (m_exclusive_lock == nullptr && m_image_ctx->object_map) { m_image_ctx->object_map->put(); m_image_ctx->object_map = nullptr; } } if (m_exclusive_lock == nullptr) { send_unregister_image_watcher(); return; } CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; // in-flight IO will be flushed and in-flight requests will be canceled // before releasing lock m_exclusive_lock->shut_down(create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_shut_down_exclusive_lock>(this)); } template <typename I> void CloseRequest<I>::handle_shut_down_exclusive_lock(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; { std::shared_lock owner_locker{m_image_ctx->owner_lock}; ceph_assert(m_image_ctx->exclusive_lock == nullptr); // object map and journal closed during exclusive lock shutdown std::shared_lock image_locker{m_image_ctx->image_lock}; ceph_assert(m_image_ctx->journal == nullptr); ceph_assert(m_image_ctx->object_map == nullptr); } m_exclusive_lock->put(); m_exclusive_lock = nullptr; save_result(r); if (r < 0) { lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(r) << dendl; } send_unregister_image_watcher(); } template <typename I> void CloseRequest<I>::send_unregister_image_watcher() { if (m_image_ctx->image_watcher == nullptr) { send_flush_readahead(); return; } CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->image_watcher->unregister_watch(create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_unregister_image_watcher>(this)); } template <typename I> void CloseRequest<I>::handle_unregister_image_watcher(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to unregister image watcher: " << cpp_strerror(r) << dendl; } send_flush_readahead(); } template <typename I> void CloseRequest<I>::send_flush_readahead() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->readahead.wait_for_pending(create_async_context_callback( *m_image_ctx, create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_flush_readahead>(this))); } template <typename I> void CloseRequest<I>::handle_flush_readahead(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; send_shut_down_image_dispatcher(); } template <typename I> void CloseRequest<I>::send_shut_down_image_dispatcher() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->io_image_dispatcher->shut_down(create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_shut_down_image_dispatcher>(this)); } template <typename I> void CloseRequest<I>::handle_shut_down_image_dispatcher(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to shut down image dispatcher: " << cpp_strerror(r) << dendl; } send_shut_down_object_dispatcher(); } template <typename I> void CloseRequest<I>::send_shut_down_object_dispatcher() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->io_object_dispatcher->shut_down(create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_shut_down_object_dispatcher>(this)); } template <typename I> void CloseRequest<I>::handle_shut_down_object_dispatcher(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; save_result(r); if (r < 0) { lderr(cct) << "failed to shut down object dispatcher: " << cpp_strerror(r) << dendl; } send_flush_op_work_queue(); } template <typename I> void CloseRequest<I>::send_flush_op_work_queue() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->op_work_queue->queue(create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_flush_op_work_queue>(this), 0); } template <typename I> void CloseRequest<I>::handle_flush_op_work_queue(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; send_close_parent(); } template <typename I> void CloseRequest<I>::send_close_parent() { if (m_image_ctx->parent == nullptr) { send_flush_image_watcher(); return; } CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->parent->state->close(create_async_context_callback( *m_image_ctx, create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_close_parent>(this))); } template <typename I> void CloseRequest<I>::handle_close_parent(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; m_image_ctx->parent = nullptr; save_result(r); if (r < 0) { lderr(cct) << "error closing parent image: " << cpp_strerror(r) << dendl; } send_flush_image_watcher(); } template <typename I> void CloseRequest<I>::send_flush_image_watcher() { if (m_image_ctx->image_watcher == nullptr) { finish(); return; } m_image_ctx->image_watcher->flush(create_context_callback< CloseRequest<I>, &CloseRequest<I>::handle_flush_image_watcher>(this)); } template <typename I> void CloseRequest<I>::handle_flush_image_watcher(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; if (r < 0) { lderr(cct) << "error flushing image watcher: " << cpp_strerror(r) << dendl; } save_result(r); finish(); } template <typename I> void CloseRequest<I>::finish() { m_image_ctx->shutdown(); m_on_finish->complete(m_error_result); delete this; } } // namespace image } // namespace librbd template class librbd::image::CloseRequest<librbd::ImageCtx>;
10,339
28.458689
83
cc
null
ceph-main/src/librbd/image/CloseRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_CLOSE_REQUEST_H #define CEPH_LIBRBD_IMAGE_CLOSE_REQUEST_H #include "librbd/ImageCtx.h" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class CloseRequest { public: static CloseRequest *create(ImageCtxT *image_ctx, Context *on_finish) { return new CloseRequest(image_ctx, on_finish); } void send(); private: /** * @verbatim * * <start> * | * v * BLOCK_IMAGE_WATCHER (skip if R/O) * | * v * SHUT_DOWN_UPDATE_WATCHERS * | * v * FLUSH * | * v (skip if disabled) * SHUT_DOWN_EXCLUSIVE_LOCK * | * v * UNREGISTER_IMAGE_WATCHER (skip if R/O) * | * v * FLUSH_READAHEAD * | * v * SHUT_DOWN_IMAGE_DISPATCHER * | * v * SHUT_DOWN_OBJECT_DISPATCHER * | * v * FLUSH_OP_WORK_QUEUE * | * v (skip if no parent) * CLOSE_PARENT * | * v * FLUSH_IMAGE_WATCHER * | * v * <finish> * * @endverbatim */ CloseRequest(ImageCtxT *image_ctx, Context *on_finish); ImageCtxT *m_image_ctx; Context *m_on_finish; int m_error_result; decltype(m_image_ctx->exclusive_lock) m_exclusive_lock; void send_block_image_watcher(); void handle_block_image_watcher(int r); void send_shut_down_update_watchers(); void handle_shut_down_update_watchers(int r); void send_flush(); void handle_flush(int r); void send_shut_down_exclusive_lock(); void handle_shut_down_exclusive_lock(int r); void send_unregister_image_watcher(); void handle_unregister_image_watcher(int r); void send_flush_readahead(); void handle_flush_readahead(int r); void send_shut_down_image_dispatcher(); void handle_shut_down_image_dispatcher(int r); void send_shut_down_object_dispatcher(); void handle_shut_down_object_dispatcher(int r); void send_flush_op_work_queue(); void handle_flush_op_work_queue(int r); void send_close_parent(); void handle_close_parent(int r); void send_flush_image_watcher(); void handle_flush_image_watcher(int r); void finish(); void save_result(int result) { if (m_error_result == 0 && result < 0) { m_error_result = result; } } }; } // namespace image } // namespace librbd extern template class librbd::image::CloseRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_CLOSE_REQUEST_H
2,543
18.875
73
h
null
ceph-main/src/librbd/image/CreateRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/CreateRequest.h" #include "include/ceph_assert.h" #include "common/dout.h" #include "common/errno.h" #include "common/ceph_context.h" #include "cls/rbd/cls_rbd_client.h" #include "osdc/Striper.h" #include "librbd/Features.h" #include "librbd/Journal.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/image/Types.h" #include "librbd/image/ValidatePoolRequest.h" #include "librbd/journal/CreateRequest.h" #include "librbd/journal/RemoveRequest.h" #include "librbd/journal/TypeTraits.h" #include "librbd/mirror/EnableRequest.h" #include "journal/Journaler.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::CreateRequest: " << __func__ \ << ": " namespace librbd { namespace image { using util::create_rados_callback; using util::create_context_callback; namespace { int validate_features(CephContext *cct, uint64_t features) { if (features & ~RBD_FEATURES_ALL) { lderr(cct) << "librbd does not support requested features." << dendl; return -ENOSYS; } if ((features & RBD_FEATURES_INTERNAL) != 0) { lderr(cct) << "cannot use internally controlled features" << dendl; return -EINVAL; } if ((features & RBD_FEATURE_FAST_DIFF) != 0 && (features & RBD_FEATURE_OBJECT_MAP) == 0) { lderr(cct) << "cannot use fast diff without object map" << dendl; return -EINVAL; } if ((features & RBD_FEATURE_OBJECT_MAP) != 0 && (features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) { lderr(cct) << "cannot use object map without exclusive lock" << dendl; return -EINVAL; } if ((features & RBD_FEATURE_JOURNALING) != 0 && (features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) { lderr(cct) << "cannot use journaling without exclusive lock" << dendl; return -EINVAL; } return 0; } int validate_striping(CephContext *cct, uint8_t order, uint64_t stripe_unit, uint64_t stripe_count) { if ((stripe_unit && !stripe_count) || (!stripe_unit && stripe_count)) { lderr(cct) << "must specify both (or neither) of stripe-unit and " << "stripe-count" << dendl; return -EINVAL; } else if (stripe_unit && ((1ull << order) % stripe_unit || stripe_unit > (1ull << order))) { lderr(cct) << "stripe unit is not a factor of the object size" << dendl; return -EINVAL; } else if (stripe_unit != 0 && stripe_unit < 512) { lderr(cct) << "stripe unit must be at least 512 bytes" << dendl; return -EINVAL; } return 0; } bool validate_layout(CephContext *cct, uint64_t size, file_layout_t &layout) { if (!librbd::ObjectMap<>::is_compatible(layout, size)) { lderr(cct) << "image size not compatible with object map" << dendl; return false; } return true; } int get_image_option(const ImageOptions &image_options, int option, uint8_t *value) { uint64_t large_value; int r = image_options.get(option, &large_value); if (r < 0) { return r; } *value = static_cast<uint8_t>(large_value); return 0; } } // anonymous namespace template<typename I> int CreateRequest<I>::validate_order(CephContext *cct, uint8_t order) { if (order > 25 || order < 12) { lderr(cct) << "order must be in the range [12, 25]" << dendl; return -EDOM; } return 0; } #undef dout_prefix #define dout_prefix *_dout << "librbd::image::CreateRequest: " << this << " " \ << __func__ << ": " template<typename I> CreateRequest<I>::CreateRequest(const ConfigProxy& config, IoCtx &ioctx, const std::string &image_name, const std::string &image_id, uint64_t size, const ImageOptions &image_options, uint32_t create_flags, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish) : m_config(config), m_image_name(image_name), m_image_id(image_id), m_size(size), m_create_flags(create_flags), m_mirror_image_mode(mirror_image_mode), m_non_primary_global_image_id(non_primary_global_image_id), m_primary_mirror_uuid(primary_mirror_uuid), m_op_work_queue(op_work_queue), m_on_finish(on_finish) { m_io_ctx.dup(ioctx); m_cct = reinterpret_cast<CephContext *>(m_io_ctx.cct()); m_id_obj = util::id_obj_name(m_image_name); m_header_obj = util::header_name(m_image_id); m_objmap_name = ObjectMap<>::object_map_name(m_image_id, CEPH_NOSNAP); if (!non_primary_global_image_id.empty() && (m_create_flags & CREATE_FLAG_MIRROR_ENABLE_MASK) == 0) { m_create_flags |= CREATE_FLAG_FORCE_MIRROR_ENABLE; } if (image_options.get(RBD_IMAGE_OPTION_FEATURES, &m_features) != 0) { m_features = librbd::rbd_features_from_string( m_config.get_val<std::string>("rbd_default_features"), nullptr); m_negotiate_features = true; } uint64_t features_clear = 0; uint64_t features_set = 0; image_options.get(RBD_IMAGE_OPTION_FEATURES_CLEAR, &features_clear); image_options.get(RBD_IMAGE_OPTION_FEATURES_SET, &features_set); uint64_t features_conflict = features_clear & features_set; features_clear &= ~features_conflict; features_set &= ~features_conflict; m_features |= features_set; m_features &= ~features_clear; m_features &= ~RBD_FEATURES_IMPLICIT_ENABLE; if ((m_features & RBD_FEATURE_OBJECT_MAP) == RBD_FEATURE_OBJECT_MAP) { m_features |= RBD_FEATURE_FAST_DIFF; } if (image_options.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &m_stripe_unit) != 0 || m_stripe_unit == 0) { m_stripe_unit = m_config.get_val<Option::size_t>("rbd_default_stripe_unit"); } if (image_options.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &m_stripe_count) != 0 || m_stripe_count == 0) { m_stripe_count = m_config.get_val<uint64_t>("rbd_default_stripe_count"); } if (get_image_option(image_options, RBD_IMAGE_OPTION_ORDER, &m_order) != 0 || m_order == 0) { m_order = config.get_val<uint64_t>("rbd_default_order"); } if (get_image_option(image_options, RBD_IMAGE_OPTION_JOURNAL_ORDER, &m_journal_order) != 0) { m_journal_order = m_config.get_val<uint64_t>("rbd_journal_order"); } if (get_image_option(image_options, RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH, &m_journal_splay_width) != 0) { m_journal_splay_width = m_config.get_val<uint64_t>( "rbd_journal_splay_width"); } if (image_options.get(RBD_IMAGE_OPTION_JOURNAL_POOL, &m_journal_pool) != 0) { m_journal_pool = m_config.get_val<std::string>("rbd_journal_pool"); } if (image_options.get(RBD_IMAGE_OPTION_DATA_POOL, &m_data_pool) != 0) { m_data_pool = m_config.get_val<std::string>("rbd_default_data_pool"); } m_layout.object_size = 1ull << m_order; if (m_stripe_unit == 0 || m_stripe_count == 0) { m_layout.stripe_unit = m_layout.object_size; m_layout.stripe_count = 1; } else { m_layout.stripe_unit = m_stripe_unit; m_layout.stripe_count = m_stripe_count; } if (!m_data_pool.empty() && m_data_pool != ioctx.get_pool_name()) { m_features |= RBD_FEATURE_DATA_POOL; } else { m_data_pool.clear(); } if ((m_stripe_unit != 0 && m_stripe_unit != (1ULL << m_order)) || (m_stripe_count != 0 && m_stripe_count != 1)) { m_features |= RBD_FEATURE_STRIPINGV2; } ldout(m_cct, 10) << "name=" << m_image_name << ", " << "id=" << m_image_id << ", " << "size=" << m_size << ", " << "features=" << m_features << ", " << "order=" << (uint64_t)m_order << ", " << "stripe_unit=" << m_stripe_unit << ", " << "stripe_count=" << m_stripe_count << ", " << "journal_order=" << (uint64_t)m_journal_order << ", " << "journal_splay_width=" << (uint64_t)m_journal_splay_width << ", " << "journal_pool=" << m_journal_pool << ", " << "data_pool=" << m_data_pool << dendl; } template<typename I> void CreateRequest<I>::send() { ldout(m_cct, 20) << dendl; int r = validate_features(m_cct, m_features); if (r < 0) { complete(r); return; } r = validate_order(m_cct, m_order); if (r < 0) { complete(r); return; } r = validate_striping(m_cct, m_order, m_stripe_unit, m_stripe_count); if (r < 0) { complete(r); return; } if (((m_features & RBD_FEATURE_OBJECT_MAP) != 0) && (!validate_layout(m_cct, m_size, m_layout))) { complete(-EINVAL); return; } validate_data_pool(); } template <typename I> void CreateRequest<I>::validate_data_pool() { m_data_io_ctx = m_io_ctx; if ((m_features & RBD_FEATURE_DATA_POOL) != 0) { librados::Rados rados(m_io_ctx); int r = rados.ioctx_create(m_data_pool.c_str(), m_data_io_ctx); if (r < 0) { lderr(m_cct) << "data pool " << m_data_pool << " does not exist" << dendl; complete(r); return; } m_data_pool_id = m_data_io_ctx.get_id(); m_data_io_ctx.set_namespace(m_io_ctx.get_namespace()); } if (!m_config.get_val<bool>("rbd_validate_pool")) { add_image_to_directory(); return; } ldout(m_cct, 15) << dendl; auto ctx = create_context_callback< CreateRequest<I>, &CreateRequest<I>::handle_validate_data_pool>(this); auto req = ValidatePoolRequest<I>::create(m_data_io_ctx, ctx); req->send(); } template <typename I> void CreateRequest<I>::handle_validate_data_pool(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r == -EINVAL) { lderr(m_cct) << "pool does not support RBD images" << dendl; complete(r); return; } else if (r < 0) { lderr(m_cct) << "failed to validate pool: " << cpp_strerror(r) << dendl; complete(r); return; } add_image_to_directory(); } template<typename I> void CreateRequest<I>::add_image_to_directory() { ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; if (!m_io_ctx.get_namespace().empty()) { cls_client::dir_state_assert(&op, cls::rbd::DIRECTORY_STATE_READY); } cls_client::dir_add_image(&op, m_image_name, m_image_id); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_add_image_to_directory>(this); int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_add_image_to_directory(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r == -EEXIST) { ldout(m_cct, 5) << "directory entry for image " << m_image_name << " already exists" << dendl; complete(r); return; } else if (!m_io_ctx.get_namespace().empty() && r == -ENOENT) { ldout(m_cct, 5) << "namespace " << m_io_ctx.get_namespace() << " does not exist" << dendl; complete(r); return; } else if (r < 0) { lderr(m_cct) << "error adding image to directory: " << cpp_strerror(r) << dendl; complete(r); return; } create_id_object(); } template<typename I> void CreateRequest<I>::create_id_object() { ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; op.create(true); cls_client::set_id(&op, m_image_id); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_create_id_object>(this); int r = m_io_ctx.aio_operate(m_id_obj, comp, &op); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_create_id_object(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r == -EEXIST) { ldout(m_cct, 5) << "id object for " << m_image_name << " already exists" << dendl; m_r_saved = r; remove_from_dir(); return; } else if (r < 0) { lderr(m_cct) << "error creating RBD id object: " << cpp_strerror(r) << dendl; m_r_saved = r; remove_from_dir(); return; } negotiate_features(); } template<typename I> void CreateRequest<I>::negotiate_features() { if (!m_negotiate_features) { create_image(); return; } ldout(m_cct, 15) << dendl; librados::ObjectReadOperation op; cls_client::get_all_features_start(&op); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_negotiate_features>(this); m_outbl.clear(); int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op, &m_outbl); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_negotiate_features(int r) { ldout(m_cct, 15) << "r=" << r << dendl; uint64_t all_features; if (r >= 0) { auto it = m_outbl.cbegin(); r = cls_client::get_all_features_finish(&it, &all_features); } if (r < 0) { ldout(m_cct, 10) << "error retrieving server supported features set: " << cpp_strerror(r) << dendl; } else if ((m_features & all_features) != m_features) { m_features &= all_features; ldout(m_cct, 10) << "limiting default features set to server supported: " << m_features << dendl; } create_image(); } template<typename I> void CreateRequest<I>::create_image() { ldout(m_cct, 15) << dendl; ceph_assert(m_data_pool.empty() || m_data_pool_id != -1); std::ostringstream oss; oss << RBD_DATA_PREFIX; if (m_data_pool_id != -1) { oss << stringify(m_io_ctx.get_id()) << "."; } oss << m_image_id; if (oss.str().length() > RBD_MAX_BLOCK_NAME_PREFIX_LENGTH) { lderr(m_cct) << "object prefix '" << oss.str() << "' too large" << dendl; m_r_saved = -EINVAL; remove_id_object(); return; } librados::ObjectWriteOperation op; op.create(true); cls_client::create_image(&op, m_size, m_order, m_features, oss.str(), m_data_pool_id); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_create_image>(this); int r = m_io_ctx.aio_operate(m_header_obj, comp, &op); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_create_image(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r == -EEXIST) { ldout(m_cct, 5) << "image id already in-use" << dendl; complete(-EBADF); return; } else if (r < 0) { lderr(m_cct) << "error writing header: " << cpp_strerror(r) << dendl; m_r_saved = r; remove_id_object(); return; } set_stripe_unit_count(); } template<typename I> void CreateRequest<I>::set_stripe_unit_count() { if ((!m_stripe_unit && !m_stripe_count) || ((m_stripe_count == 1) && (m_stripe_unit == (1ull << m_order)))) { object_map_resize(); return; } ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::set_stripe_unit_count(&op, m_stripe_unit, m_stripe_count); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_set_stripe_unit_count>(this); int r = m_io_ctx.aio_operate(m_header_obj, comp, &op); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_set_stripe_unit_count(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error setting stripe unit/count: " << cpp_strerror(r) << dendl; m_r_saved = r; remove_header_object(); return; } object_map_resize(); } template<typename I> void CreateRequest<I>::object_map_resize() { if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0) { fetch_mirror_mode(); return; } ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::object_map_resize(&op, Striper::get_num_objects(m_layout, m_size), OBJECT_NONEXISTENT); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_object_map_resize>(this); int r = m_io_ctx.aio_operate(m_objmap_name, comp, &op); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_object_map_resize(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error creating initial object map: " << cpp_strerror(r) << dendl; m_r_saved = r; remove_header_object(); return; } fetch_mirror_mode(); } template<typename I> void CreateRequest<I>::fetch_mirror_mode() { if ((m_features & RBD_FEATURE_JOURNALING) == 0) { mirror_image_enable(); return; } ldout(m_cct, 15) << dendl; librados::ObjectReadOperation op; cls_client::mirror_mode_get_start(&op); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_fetch_mirror_mode>(this); m_outbl.clear(); int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_outbl); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_fetch_mirror_mode(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if ((r < 0) && (r != -ENOENT)) { lderr(m_cct) << "failed to retrieve mirror mode: " << cpp_strerror(r) << dendl; m_r_saved = r; remove_object_map(); return; } m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED; if (r == 0) { auto it = m_outbl.cbegin(); r = cls_client::mirror_mode_get_finish(&it, &m_mirror_mode); if (r < 0) { lderr(m_cct) << "Failed to retrieve mirror mode" << dendl; m_r_saved = r; remove_object_map(); return; } } journal_create(); } template<typename I> void CreateRequest<I>::journal_create() { ldout(m_cct, 15) << dendl; using klass = CreateRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_journal_create>( this); // only link to remote primary mirror uuid if in journal-based // mirroring mode bool use_primary_mirror_uuid = ( !m_non_primary_global_image_id.empty() && m_mirror_image_mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL); librbd::journal::TagData tag_data; tag_data.mirror_uuid = (use_primary_mirror_uuid ? m_primary_mirror_uuid : librbd::Journal<I>::LOCAL_MIRROR_UUID); typename journal::TypeTraits<I>::ContextWQ* context_wq; Journal<>::get_work_queue(m_cct, &context_wq); auto req = librbd::journal::CreateRequest<I>::create( m_io_ctx, m_image_id, m_journal_order, m_journal_splay_width, m_journal_pool, cls::journal::Tag::TAG_CLASS_NEW, tag_data, librbd::Journal<I>::IMAGE_CLIENT_ID, context_wq, ctx); req->send(); } template<typename I> void CreateRequest<I>::handle_journal_create(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error creating journal: " << cpp_strerror(r) << dendl; m_r_saved = r; remove_object_map(); return; } mirror_image_enable(); } template<typename I> void CreateRequest<I>::mirror_image_enable() { auto mirror_enable_flag = (m_create_flags & CREATE_FLAG_MIRROR_ENABLE_MASK); if ((m_mirror_mode != cls::rbd::MIRROR_MODE_POOL && mirror_enable_flag != CREATE_FLAG_FORCE_MIRROR_ENABLE) || (mirror_enable_flag == CREATE_FLAG_SKIP_MIRROR_ENABLE)) { complete(0); return; } ldout(m_cct, 15) << dendl; auto ctx = create_context_callback< CreateRequest<I>, &CreateRequest<I>::handle_mirror_image_enable>(this); auto req = mirror::EnableRequest<I>::create( m_io_ctx, m_image_id, m_mirror_image_mode, m_non_primary_global_image_id, true, m_op_work_queue, ctx); req->send(); } template<typename I> void CreateRequest<I>::handle_mirror_image_enable(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "cannot enable mirroring: " << cpp_strerror(r) << dendl; m_r_saved = r; journal_remove(); return; } complete(0); } template<typename I> void CreateRequest<I>::complete(int r) { ldout(m_cct, 10) << "r=" << r << dendl; m_data_io_ctx.close(); auto on_finish = m_on_finish; delete this; on_finish->complete(r); } // cleanup template<typename I> void CreateRequest<I>::journal_remove() { if ((m_features & RBD_FEATURE_JOURNALING) == 0) { remove_object_map(); return; } ldout(m_cct, 15) << dendl; using klass = CreateRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_journal_remove>( this); typename journal::TypeTraits<I>::ContextWQ* context_wq; Journal<>::get_work_queue(m_cct, &context_wq); librbd::journal::RemoveRequest<I> *req = librbd::journal::RemoveRequest<I>::create( m_io_ctx, m_image_id, librbd::Journal<I>::IMAGE_CLIENT_ID, context_wq, ctx); req->send(); } template<typename I> void CreateRequest<I>::handle_journal_remove(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error cleaning up journal after creation failed: " << cpp_strerror(r) << dendl; } remove_object_map(); } template<typename I> void CreateRequest<I>::remove_object_map() { if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0) { remove_header_object(); return; } ldout(m_cct, 15) << dendl; using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_remove_object_map>(this); int r = m_io_ctx.aio_remove(m_objmap_name, comp); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_remove_object_map(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error cleaning up object map after creation failed: " << cpp_strerror(r) << dendl; } remove_header_object(); } template<typename I> void CreateRequest<I>::remove_header_object() { ldout(m_cct, 15) << dendl; using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_remove_header_object>(this); int r = m_io_ctx.aio_remove(m_header_obj, comp); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_remove_header_object(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error cleaning up image header after creation failed: " << cpp_strerror(r) << dendl; } remove_id_object(); } template<typename I> void CreateRequest<I>::remove_id_object() { ldout(m_cct, 15) << dendl; using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_remove_id_object>(this); int r = m_io_ctx.aio_remove(m_id_obj, comp); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_remove_id_object(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error cleaning up id object after creation failed: " << cpp_strerror(r) << dendl; } remove_from_dir(); } template<typename I> void CreateRequest<I>::remove_from_dir() { ldout(m_cct, 15) << dendl; librados::ObjectWriteOperation op; cls_client::dir_remove_image(&op, m_image_name, m_image_id); using klass = CreateRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_remove_from_dir>(this); int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op); ceph_assert(r == 0); comp->release(); } template<typename I> void CreateRequest<I>::handle_remove_from_dir(int r) { ldout(m_cct, 15) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error cleaning up image from rbd_directory object " << "after creation failed: " << cpp_strerror(r) << dendl; } complete(m_r_saved); } } //namespace image } //namespace librbd template class librbd::image::CreateRequest<librbd::ImageCtx>;
24,258
28.017943
95
cc
null
ceph-main/src/librbd/image/CreateRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_CREATE_REQUEST_H #define CEPH_LIBRBD_IMAGE_CREATE_REQUEST_H #include "common/config_fwd.h" #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" #include "cls/rbd/cls_rbd_types.h" #include "librbd/ImageCtx.h" class Context; using librados::IoCtx; namespace journal { class Journaler; } namespace librbd { namespace asio { struct ContextWQ; } namespace image { template <typename ImageCtxT = ImageCtx> class CreateRequest { public: static CreateRequest *create(const ConfigProxy& config, IoCtx &ioctx, const std::string &image_name, const std::string &image_id, uint64_t size, const ImageOptions &image_options, uint32_t create_flags, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish) { return new CreateRequest(config, ioctx, image_name, image_id, size, image_options, create_flags, mirror_image_mode, non_primary_global_image_id, primary_mirror_uuid, op_work_queue, on_finish); } static int validate_order(CephContext *cct, uint8_t order); void send(); private: /** * @verbatim * * <start> . . . . > . . . . . * | . * v . * VALIDATE DATA POOL v (pool validation * | . disabled) * v . * (error: bottom up) ADD IMAGE TO DIRECTORY < . . . . * _______<_______ | * | | v * | | CREATE ID OBJECT * | | / | * | REMOVE FROM DIR <-------/ v * | | NEGOTIATE FEATURES (when using default features) * | | | * | | v (stripingv2 disabled) * | | CREATE IMAGE. . . . > . . . . * v | / | . * | REMOVE ID OBJ <---------/ v . * | | SET STRIPE UNIT COUNT . * | | / | \ . . . . . > . . . . * | REMOVE HEADER OBJ<------/ v /. (object-map * | |\ OBJECT MAP RESIZE . . < . . * v disabled) * | | \ / | \ . . . . . > . . . . * | | *<-----------/ v /. (journaling * | | FETCH MIRROR MODE. . < . . * v disabled) * | | / | . * | REMOVE OBJECT MAP<--------/ v . * | |\ JOURNAL CREATE . * | | \ / | . * v | *<------------/ v . * | | MIRROR IMAGE ENABLE . * | | / | . * | JOURNAL REMOVE*<-------/ | . * | v . * |_____________>___________________<finish> . . . . < . . . . * * @endverbatim */ CreateRequest(const ConfigProxy& config, IoCtx &ioctx, const std::string &image_name, const std::string &image_id, uint64_t size, const ImageOptions &image_options, uint32_t create_flags, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish); const ConfigProxy& m_config; IoCtx m_io_ctx; IoCtx m_data_io_ctx; std::string m_image_name; std::string m_image_id; uint64_t m_size; uint8_t m_order = 0; uint64_t m_features = 0; uint64_t m_stripe_unit = 0; uint64_t m_stripe_count = 0; uint8_t m_journal_order = 0; uint8_t m_journal_splay_width = 0; std::string m_journal_pool; std::string m_data_pool; int64_t m_data_pool_id = -1; uint32_t m_create_flags; cls::rbd::MirrorImageMode m_mirror_image_mode; const std::string m_non_primary_global_image_id; const std::string m_primary_mirror_uuid; bool m_negotiate_features = false; asio::ContextWQ *m_op_work_queue; Context *m_on_finish; CephContext *m_cct; int m_r_saved = 0; // used to return actual error after cleanup file_layout_t m_layout; std::string m_id_obj, m_header_obj, m_objmap_name; bufferlist m_outbl; cls::rbd::MirrorMode m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED; cls::rbd::MirrorImage m_mirror_image_internal; void validate_data_pool(); void handle_validate_data_pool(int r); void add_image_to_directory(); void handle_add_image_to_directory(int r); void create_id_object(); void handle_create_id_object(int r); void negotiate_features(); void handle_negotiate_features(int r); void create_image(); void handle_create_image(int r); void set_stripe_unit_count(); void handle_set_stripe_unit_count(int r); void object_map_resize(); void handle_object_map_resize(int r); void fetch_mirror_mode(); void handle_fetch_mirror_mode(int r); void journal_create(); void handle_journal_create(int r); void mirror_image_enable(); void handle_mirror_image_enable(int r); void complete(int r); // cleanup void journal_remove(); void handle_journal_remove(int r); void remove_object_map(); void handle_remove_object_map(int r); void remove_header_object(); void handle_remove_header_object(int r); void remove_id_object(); void handle_remove_id_object(int r); void remove_from_dir(); void handle_remove_from_dir(int r); }; } //namespace image } //namespace librbd extern template class librbd::image::CreateRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_CREATE_REQUEST_H
6,757
34.197917
82
h
null
ceph-main/src/librbd/image/DetachChildRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/DetachChildRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/journal/DisabledPolicy.h" #include "librbd/trash/RemoveRequest.h" #include <string> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::DetachChildRequest: " << this \ << " " << __func__ << ": " namespace librbd { namespace image { using util::create_context_callback; using util::create_rados_callback; template <typename I> DetachChildRequest<I>::~DetachChildRequest() { ceph_assert(m_parent_image_ctx == nullptr); } template <typename I> void DetachChildRequest<I>::send() { { std::shared_lock image_locker{m_image_ctx.image_lock}; // use oldest snapshot or HEAD for parent spec if (!m_image_ctx.snap_info.empty()) { m_parent_spec = m_image_ctx.snap_info.begin()->second.parent.spec; } else { m_parent_spec = m_image_ctx.parent_md.spec; } } if (m_parent_spec.pool_id == -1) { // ignore potential race with parent disappearing m_image_ctx.op_work_queue->queue(create_context_callback< DetachChildRequest<I>, &DetachChildRequest<I>::finish>(this), 0); return; } else if (!m_image_ctx.test_op_features(RBD_OPERATION_FEATURE_CLONE_CHILD)) { clone_v1_remove_child(); return; } clone_v2_child_detach(); } template <typename I> void DetachChildRequest<I>::clone_v2_child_detach() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; librados::ObjectWriteOperation op; cls_client::child_detach(&op, m_parent_spec.snap_id, {m_image_ctx.md_ctx.get_id(), m_image_ctx.md_ctx.get_namespace(), m_image_ctx.id}); int r = util::create_ioctx(m_image_ctx.md_ctx, "parent image", m_parent_spec.pool_id, m_parent_spec.pool_namespace, &m_parent_io_ctx); if (r < 0) { if (r == -ENOENT) { r = 0; } finish(r); return; } m_parent_header_name = util::header_name(m_parent_spec.image_id); auto aio_comp = create_rados_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v2_child_detach>(this); r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op); ceph_assert(r == 0); aio_comp->release(); } template <typename I> void DetachChildRequest<I>::handle_clone_v2_child_detach(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(cct) << "error detaching child from parent: " << cpp_strerror(r) << dendl; finish(r); return; } clone_v2_get_snapshot(); } template <typename I> void DetachChildRequest<I>::clone_v2_get_snapshot() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; librados::ObjectReadOperation op; cls_client::snapshot_get_start(&op, m_parent_spec.snap_id); m_out_bl.clear(); auto aio_comp = create_rados_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v2_get_snapshot>(this); int r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op, &m_out_bl); ceph_assert(r == 0); aio_comp->release(); } template <typename I> void DetachChildRequest<I>::handle_clone_v2_get_snapshot(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; bool remove_snapshot = false; if (r == 0) { cls::rbd::SnapshotInfo snap_info; auto it = m_out_bl.cbegin(); r = cls_client::snapshot_get_finish(&it, &snap_info); if (r == 0) { m_parent_snap_namespace = snap_info.snapshot_namespace; m_parent_snap_name = snap_info.name; if (cls::rbd::get_snap_namespace_type(m_parent_snap_namespace) == cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH && snap_info.child_count == 0) { // snapshot is in trash w/ zero children, so remove it remove_snapshot = true; } } } if (r < 0 && r != -ENOENT) { ldout(cct, 5) << "failed to retrieve snapshot: " << cpp_strerror(r) << dendl; } if (!remove_snapshot) { finish(0); return; } clone_v2_open_parent(); } template<typename I> void DetachChildRequest<I>::clone_v2_open_parent() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; m_parent_image_ctx = I::create("", m_parent_spec.image_id, nullptr, m_parent_io_ctx, false); // ensure non-primary images can be modified m_parent_image_ctx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY; auto ctx = create_context_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v2_open_parent>(this); m_parent_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT, ctx); } template<typename I> void DetachChildRequest<I>::handle_clone_v2_open_parent(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0) { ldout(cct, 5) << "failed to open parent for read/write: " << cpp_strerror(r) << dendl; m_parent_image_ctx = nullptr; finish(0); return; } // do not attempt to open the parent journal when removing the trash // snapshot, because the parent may be not promoted if (m_parent_image_ctx->test_features(RBD_FEATURE_JOURNALING)) { std::unique_lock image_locker{m_parent_image_ctx->image_lock}; m_parent_image_ctx->set_journal_policy(new journal::DisabledPolicy()); } // disallow any proxied maintenance operations { std::shared_lock owner_locker{m_parent_image_ctx->owner_lock}; if (m_parent_image_ctx->exclusive_lock != nullptr) { m_parent_image_ctx->exclusive_lock->block_requests(0); } } clone_v2_remove_snapshot(); } template<typename I> void DetachChildRequest<I>::clone_v2_remove_snapshot() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; auto ctx = create_context_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v2_remove_snapshot>(this); m_parent_image_ctx->operations->snap_remove(m_parent_snap_namespace, m_parent_snap_name, ctx); } template<typename I> void DetachChildRequest<I>::handle_clone_v2_remove_snapshot(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { ldout(cct, 5) << "failed to remove trashed clone snapshot: " << cpp_strerror(r) << dendl; clone_v2_close_parent(); return; } if (m_parent_image_ctx->snaps.empty()) { clone_v2_get_parent_trash_entry(); } else { clone_v2_close_parent(); } } template<typename I> void DetachChildRequest<I>::clone_v2_get_parent_trash_entry() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; librados::ObjectReadOperation op; cls_client::trash_get_start(&op, m_parent_image_ctx->id); m_out_bl.clear(); auto aio_comp = create_rados_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v2_get_parent_trash_entry>(this); int r = m_parent_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op, &m_out_bl); ceph_assert(r == 0); aio_comp->release(); } template<typename I> void DetachChildRequest<I>::handle_clone_v2_get_parent_trash_entry(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { ldout(cct, 5) << "failed to get parent trash entry: " << cpp_strerror(r) << dendl; clone_v2_close_parent(); return; } bool in_trash = false; if (r == 0) { cls::rbd::TrashImageSpec trash_spec; auto it = m_out_bl.cbegin(); r = cls_client::trash_get_finish(&it, &trash_spec); if (r == 0 && trash_spec.source == cls::rbd::TRASH_IMAGE_SOURCE_USER_PARENT && trash_spec.state == cls::rbd::TRASH_IMAGE_STATE_NORMAL && trash_spec.deferment_end_time <= ceph_clock_now()) { in_trash = true; } } if (in_trash) { clone_v2_remove_parent_from_trash(); } else { clone_v2_close_parent(); } } template<typename I> void DetachChildRequest<I>::clone_v2_remove_parent_from_trash() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; auto ctx = create_context_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v2_remove_parent_from_trash>(this); auto req = librbd::trash::RemoveRequest<I>::create( m_parent_io_ctx, m_parent_image_ctx, m_image_ctx.op_work_queue, false, m_no_op, ctx); req->send(); } template<typename I> void DetachChildRequest<I>::handle_clone_v2_remove_parent_from_trash(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0) { ldout(cct, 5) << "failed to remove parent image:" << cpp_strerror(r) << dendl; } m_parent_image_ctx = nullptr; finish(0); } template<typename I> void DetachChildRequest<I>::clone_v2_close_parent() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; auto ctx = create_context_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v2_close_parent>(this); m_parent_image_ctx->state->close(ctx); } template<typename I> void DetachChildRequest<I>::handle_clone_v2_close_parent(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0) { ldout(cct, 5) << "failed to close parent image:" << cpp_strerror(r) << dendl; } m_parent_image_ctx = nullptr; finish(0); } template<typename I> void DetachChildRequest<I>::clone_v1_remove_child() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; m_parent_spec.pool_namespace = ""; librados::ObjectWriteOperation op; librbd::cls_client::remove_child(&op, m_parent_spec, m_image_ctx.id); auto aio_comp = create_rados_callback< DetachChildRequest<I>, &DetachChildRequest<I>::handle_clone_v1_remove_child>(this); int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, aio_comp, &op); ceph_assert(r == 0); aio_comp->release(); } template<typename I> void DetachChildRequest<I>::handle_clone_v1_remove_child(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; if (r == -ENOENT) { r = 0; } else if (r < 0) { lderr(cct) << "failed to remove child from children list: " << cpp_strerror(r) << dendl; finish(r); return; } finish(0); } template <typename I> void DetachChildRequest<I>::finish(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::DetachChildRequest<librbd::ImageCtx>;
11,042
27.099237
80
cc
null
ceph-main/src/librbd/image/DetachChildRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_DETACH_CHILD_REQUEST_H #define CEPH_LIBRBD_IMAGE_DETACH_CHILD_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "librbd/Types.h" #include "librbd/internal.h" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class DetachChildRequest { public: static DetachChildRequest* create(ImageCtxT& image_ctx, Context* on_finish) { return new DetachChildRequest(image_ctx, on_finish); } DetachChildRequest(ImageCtxT& image_ctx, Context* on_finish) : m_image_ctx(image_ctx), m_on_finish(on_finish) { } ~DetachChildRequest(); void send(); private: /** * @verbatim * * <start> * | * (v1) | (v2) * /--------------/ \--------------\ * | | * v v * REMOVE_CHILD CHILD_DETACH * | | * | v * | GET_SNAPSHOT * | (snapshot in-use) . | * |/. . . . . . . . . . . . . . . | * | v * | OPEN_PARENT * | | * | v (has more children) * | REMOVE_SNAPSHOT ---------------\ * | | | * | v (noent) | * | (auto-delete when GET_PARENT_TRASH_ENTRY . . . .\| * | last child detached) | | * | v v * | REMOVE_PARENT_FROM_TRASH CLOSE_PARENT * | | | * |/------------------------------/--------------------------/ * | * v * <finish> * * @endverbatim */ ImageCtxT& m_image_ctx; Context* m_on_finish; librados::IoCtx m_parent_io_ctx; cls::rbd::ParentImageSpec m_parent_spec; std::string m_parent_header_name; cls::rbd::SnapshotNamespace m_parent_snap_namespace; std::string m_parent_snap_name; ImageCtxT* m_parent_image_ctx = nullptr; ceph::bufferlist m_out_bl; NoOpProgressContext m_no_op; void clone_v2_child_detach(); void handle_clone_v2_child_detach(int r); void clone_v2_get_snapshot(); void handle_clone_v2_get_snapshot(int r); void clone_v2_open_parent(); void handle_clone_v2_open_parent(int r); void clone_v2_remove_snapshot(); void handle_clone_v2_remove_snapshot(int r); void clone_v2_get_parent_trash_entry(); void handle_clone_v2_get_parent_trash_entry(int r); void clone_v2_remove_parent_from_trash(); void handle_clone_v2_remove_parent_from_trash(int r); void clone_v2_close_parent(); void handle_clone_v2_close_parent(int r); void clone_v1_remove_child(); void handle_clone_v1_remove_child(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::DetachChildRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_DETACH_CHILD_REQUEST_H
3,437
27.65
79
h
null
ceph-main/src/librbd/image/DetachParentRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/DetachParentRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::DetachParentRequest: " << this \ << " " << __func__ << ": " namespace librbd { namespace image { using util::create_context_callback; using util::create_rados_callback; template <typename I> void DetachParentRequest<I>::send() { detach_parent(); } template <typename I> void DetachParentRequest<I>::detach_parent() { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; librados::ObjectWriteOperation op; if (!m_legacy_parent) { librbd::cls_client::parent_detach(&op); } else { librbd::cls_client::remove_parent(&op); } auto aio_comp = create_rados_callback< DetachParentRequest<I>, &DetachParentRequest<I>::handle_detach_parent>(this); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op); ceph_assert(r == 0); aio_comp->release(); } template <typename I> void DetachParentRequest<I>::handle_detach_parent(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << dendl; if (!m_legacy_parent && r == -EOPNOTSUPP) { ldout(cct, 10) << "retrying using legacy parent method" << dendl; m_legacy_parent = true; detach_parent(); return; } if (r < 0 && r != -ENOENT) { lderr(cct) << "detach parent encountered an error: " << cpp_strerror(r) << dendl; finish(r); return; } finish(0); } template <typename I> void DetachParentRequest<I>::finish(int r) { auto cct = m_image_ctx.cct; ldout(cct, 5) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::DetachParentRequest<librbd::ImageCtx>;
2,017
23.609756
80
cc
null
ceph-main/src/librbd/image/DetachParentRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_DETACH_PARENT_REQUEST_H #define CEPH_LIBRBD_IMAGE_DETACH_PARENT_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "librbd/Types.h" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class DetachParentRequest { public: static DetachParentRequest* create(ImageCtxT& image_ctx, Context* on_finish) { return new DetachParentRequest(image_ctx, on_finish); } DetachParentRequest(ImageCtxT& image_ctx, Context* on_finish) : m_image_ctx(image_ctx), m_on_finish(on_finish) { } void send(); private: /** * @verbatim * * <start> * | * * * * * * * | * * -EOPNOTSUPP * v v * * DETACH_PARENT * * * * | * v * <finish> * * @endverbatim */ ImageCtxT& m_image_ctx; Context* m_on_finish; bool m_legacy_parent = false; void detach_parent(); void handle_detach_parent(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::DetachParentRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_DETACH_PARENT_REQUEST_H
1,326
18.80597
80
h
null
ceph-main/src/librbd/image/GetMetadataRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/GetMetadataRequest.h" #include "cls/rbd/cls_rbd_client.h" #include "common/dout.h" #include "common/errno.h" #include "include/ceph_assert.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include <boost/algorithm/string/predicate.hpp> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::GetMetadataRequest: " \ << this << " " << __func__ << ": " #define MAX_KEYS 64U namespace librbd { namespace image { namespace { static const std::string INTERNAL_KEY_PREFIX{".rbd"}; } // anonymous namespace using util::create_rados_callback; template <typename I> GetMetadataRequest<I>::GetMetadataRequest( IoCtx &io_ctx, const std::string &oid, bool filter_internal, const std::string& filter_key_prefix, const std::string& last_key, uint32_t max_results, KeyValues* key_values, Context *on_finish) : m_io_ctx(io_ctx), m_oid(oid), m_filter_internal(filter_internal), m_filter_key_prefix(filter_key_prefix), m_last_key(last_key), m_max_results(max_results), m_key_values(key_values), m_on_finish(on_finish), m_cct(reinterpret_cast<CephContext*>(m_io_ctx.cct())) { } template <typename I> void GetMetadataRequest<I>::send() { metadata_list(); } template <typename I> void GetMetadataRequest<I>::metadata_list() { ldout(m_cct, 15) << "start_key=" << m_last_key << dendl; m_expected_results = MAX_KEYS; if (m_max_results > 0) { m_expected_results = std::min<uint32_t>( m_expected_results, m_max_results - m_key_values->size()); } librados::ObjectReadOperation op; cls_client::metadata_list_start(&op, m_last_key, m_expected_results); auto aio_comp = create_rados_callback< GetMetadataRequest<I>, &GetMetadataRequest<I>::handle_metadata_list>(this); m_out_bl.clear(); m_io_ctx.aio_operate(m_oid, aio_comp, &op, &m_out_bl); aio_comp->release(); } template <typename I> void GetMetadataRequest<I>::handle_metadata_list(int r) { ldout(m_cct, 15) << "r=" << r << dendl; KeyValues metadata; if (r == 0) { auto it = m_out_bl.cbegin(); r = cls_client::metadata_list_finish(&it, &metadata); } if (r == -ENOENT || r == -EOPNOTSUPP) { finish(0); return; } else if (r < 0) { lderr(m_cct) << "failed to retrieve image metadata: " << cpp_strerror(r) << dendl; finish(r); return; } for (auto it = metadata.begin(); it != metadata.end(); ++it) { if (m_filter_internal && boost::starts_with(it->first, INTERNAL_KEY_PREFIX)) { continue; } else if (!m_filter_key_prefix.empty() && !boost::starts_with(it->first, m_filter_key_prefix)) { continue; } m_key_values->insert({it->first, std::move(it->second)}); } if (!metadata.empty()) { m_last_key = metadata.rbegin()->first; } if (metadata.size() == m_expected_results && (m_max_results == 0 || m_key_values->size() < m_max_results)) { metadata_list(); return; } finish(0); } template <typename I> void GetMetadataRequest<I>::finish(int r) { ldout(m_cct, 15) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::GetMetadataRequest<librbd::ImageCtx>;
3,391
26.803279
79
cc
null
ceph-main/src/librbd/image/GetMetadataRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_GET_METADATA_REQUEST_H #define CEPH_LIBRBD_IMAGE_GET_METADATA_REQUEST_H #include "include/common_fwd.h" #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" #include <string> #include <map> class Context; namespace librbd { struct ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class GetMetadataRequest { public: typedef std::map<std::string, bufferlist> KeyValues; static GetMetadataRequest* create( IoCtx &io_ctx, const std::string &oid, bool filter_internal, const std::string& filter_key_prefix, const std::string& last_key, uint32_t max_results, KeyValues* key_values, Context *on_finish) { return new GetMetadataRequest(io_ctx, oid, filter_internal, filter_key_prefix, last_key, max_results, key_values, on_finish); } GetMetadataRequest( IoCtx &io_ctx, const std::string &oid, bool filter_internal, const std::string& filter_key_prefix, const std::string& last_key, uint32_t max_results, KeyValues* key_values, Context *on_finish); void send(); private: /** * @verbatim * * <start> * | * | /-------\ * | | | * v v | * METADATA_LIST ---/ * | * v * <finish> * * @endverbatim */ librados::IoCtx m_io_ctx; std::string m_oid; bool m_filter_internal; std::string m_filter_key_prefix; std::string m_last_key; uint32_t m_max_results; KeyValues* m_key_values; Context* m_on_finish; CephContext* m_cct; bufferlist m_out_bl; uint32_t m_expected_results = 0; void metadata_list(); void handle_metadata_list(int r); void finish(int r); }; } //namespace image } //namespace librbd extern template class librbd::image::GetMetadataRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_GET_METADATA_REQUEST_H
2,014
22.988095
75
h
null
ceph-main/src/librbd/image/ListWatchersRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "ListWatchersRequest.h" #include "common/RWLock.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ImageCtx.h" #include "librbd/ImageWatcher.h" #include "librbd/Utils.h" #include <algorithm> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::ListWatchersRequest: " << this \ << " " << __func__ << ": " static std::ostream& operator<<(std::ostream& os, const obj_watch_t& watch) { os << "{addr=" << watch.addr << ", " << "watcher_id=" << watch.watcher_id << ", " << "cookie=" << watch.cookie << "}"; return os; } namespace librbd { namespace image { using librados::IoCtx; using util::create_rados_callback; template<typename I> ListWatchersRequest<I>::ListWatchersRequest(I &image_ctx, int flags, std::list<obj_watch_t> *watchers, Context *on_finish) : m_image_ctx(image_ctx), m_flags(flags), m_watchers(watchers), m_on_finish(on_finish), m_cct(m_image_ctx.cct) { ceph_assert((m_flags & LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES) == 0 || (m_flags & LIST_WATCHERS_MIRROR_INSTANCES_ONLY) == 0); } template<typename I> void ListWatchersRequest<I>::send() { ldout(m_cct, 20) << dendl; list_image_watchers(); } template<typename I> void ListWatchersRequest<I>::list_image_watchers() { ldout(m_cct, 20) << dendl; librados::ObjectReadOperation op; op.list_watchers(&m_object_watchers, &m_ret_val); using klass = ListWatchersRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_list_image_watchers>(this); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, rados_completion, &op, &m_out_bl); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void ListWatchersRequest<I>::handle_list_image_watchers(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r == 0 && m_ret_val < 0) { r = m_ret_val; } if (r < 0) { lderr(m_cct) << "error listing image watchers: " << cpp_strerror(r) << dendl; finish(r); return; } ldout(m_cct, 20) << "object_watchers=" << m_object_watchers << dendl; list_mirror_watchers(); } template<typename I> void ListWatchersRequest<I>::list_mirror_watchers() { if ((m_object_watchers.empty()) || (m_flags & (LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES | LIST_WATCHERS_MIRROR_INSTANCES_ONLY)) == 0) { finish(0); return; } ldout(m_cct, 20) << dendl; librados::ObjectReadOperation op; op.list_watchers(&m_mirror_watchers, &m_ret_val); using klass = ListWatchersRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_list_mirror_watchers>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, rados_completion, &op, &m_out_bl); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void ListWatchersRequest<I>::handle_list_mirror_watchers(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r == 0 && m_ret_val < 0) { r = m_ret_val; } if (r < 0 && r != -ENOENT) { ldout(m_cct, 1) << "error listing mirror watchers: " << cpp_strerror(r) << dendl; } ldout(m_cct, 20) << "mirror_watchers=" << m_mirror_watchers << dendl; finish(0); } template<typename I> void ListWatchersRequest<I>::finish(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r == 0) { m_watchers->clear(); if (m_object_watchers.size() > 0) { std::shared_lock owner_locker{m_image_ctx.owner_lock}; uint64_t watch_handle = m_image_ctx.image_watcher != nullptr ? m_image_ctx.image_watcher->get_watch_handle() : 0; for (auto &w : m_object_watchers) { if ((m_flags & LIST_WATCHERS_FILTER_OUT_MY_INSTANCE) != 0) { if (w.cookie == watch_handle) { ldout(m_cct, 20) << "filtering out my instance: " << w << dendl; continue; } } auto it = std::find_if(m_mirror_watchers.begin(), m_mirror_watchers.end(), [w] (obj_watch_t &watcher) { return (strncmp(w.addr, watcher.addr, sizeof(w.addr)) == 0); }); if ((m_flags & LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES) != 0) { if (it != m_mirror_watchers.end()) { ldout(m_cct, 20) << "filtering out mirror instance: " << w << dendl; continue; } } else if ((m_flags & LIST_WATCHERS_MIRROR_INSTANCES_ONLY) != 0) { if (it == m_mirror_watchers.end()) { ldout(m_cct, 20) << "filtering out non-mirror instance: " << w << dendl; continue; } } m_watchers->push_back(w); } } } m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::ListWatchersRequest<librbd::ImageCtx>;
5,382
29.76
80
cc
null
ceph-main/src/librbd/image/ListWatchersRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_LIST_WATCHERS_REQUEST_H #define CEPH_LIBRBD_IMAGE_LIST_WATCHERS_REQUEST_H #include "include/rados/rados_types.hpp" #include <list> class Context; namespace librbd { class ImageCtx; namespace image { enum { LIST_WATCHERS_FILTER_OUT_MY_INSTANCE = 1 << 0, LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES = 1 << 1, LIST_WATCHERS_MIRROR_INSTANCES_ONLY = 1 << 3, }; template<typename ImageCtxT = ImageCtx> class ListWatchersRequest { public: static ListWatchersRequest *create(ImageCtxT &image_ctx, int flags, std::list<obj_watch_t> *watchers, Context *on_finish) { return new ListWatchersRequest(image_ctx, flags, watchers, on_finish); } void send(); private: /** * @verbatim * * <start> * | * v * LIST_IMAGE_WATCHERS * | * v * LIST_MIRROR_WATCHERS (skip if not needed) * | * v * <finish> * * @endverbatim */ ListWatchersRequest(ImageCtxT &image_ctx, int flags, std::list<obj_watch_t> *watchers, Context *on_finish); ImageCtxT& m_image_ctx; int m_flags; std::list<obj_watch_t> *m_watchers; Context *m_on_finish; CephContext *m_cct; int m_ret_val; bufferlist m_out_bl; std::list<obj_watch_t> m_object_watchers; std::list<obj_watch_t> m_mirror_watchers; void list_image_watchers(); void handle_list_image_watchers(int r); void list_mirror_watchers(); void handle_list_mirror_watchers(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::ListWatchersRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_LIST_WATCHERS_REQUEST_H
1,824
20.987952
88
h
null
ceph-main/src/librbd/image/OpenRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/OpenRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ConfigWatcher.h" #include "librbd/ImageCtx.h" #include "librbd/PluginRegistry.h" #include "librbd/Utils.h" #include "librbd/cache/ObjectCacherObjectDispatch.h" #include "librbd/cache/WriteAroundObjectDispatch.h" #include "librbd/image/CloseRequest.h" #include "librbd/image/RefreshRequest.h" #include "librbd/image/SetSnapRequest.h" #include "librbd/io/SimpleSchedulerObjectDispatch.h" #include <boost/algorithm/string/predicate.hpp> #include "include/ceph_assert.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::OpenRequest: " namespace librbd { namespace image { using util::create_context_callback; using util::create_rados_callback; template <typename I> OpenRequest<I>::OpenRequest(I *image_ctx, uint64_t flags, Context *on_finish) : m_image_ctx(image_ctx), m_skip_open_parent_image(flags & OPEN_FLAG_SKIP_OPEN_PARENT), m_on_finish(on_finish), m_error_result(0) { if ((flags & OPEN_FLAG_OLD_FORMAT) != 0) { m_image_ctx->old_format = true; } if ((flags & OPEN_FLAG_IGNORE_MIGRATING) != 0) { m_image_ctx->ignore_migrating = true; } } template <typename I> void OpenRequest<I>::send() { if (m_image_ctx->old_format) { send_v1_detect_header(); } else { send_v2_detect_header(); } } template <typename I> void OpenRequest<I>::send_v1_detect_header() { librados::ObjectReadOperation op; op.stat(NULL, NULL, NULL); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_v1_detect_header>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(util::old_header_name(m_image_ctx->name), comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v1_detect_header(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { if (*result != -ENOENT) { lderr(cct) << "failed to stat image header: " << cpp_strerror(*result) << dendl; } send_close_image(*result); } else { ldout(cct, 1) << "RBD image format 1 is deprecated. " << "Please copy this image to image format 2." << dendl; m_image_ctx->old_format = true; m_image_ctx->header_oid = util::old_header_name(m_image_ctx->name); m_image_ctx->apply_metadata({}, true); send_refresh(); } return nullptr; } template <typename I> void OpenRequest<I>::send_v2_detect_header() { if (m_image_ctx->id.empty()) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; op.stat(NULL, NULL, NULL); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_v2_detect_header>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(util::id_obj_name(m_image_ctx->name), comp, &op, &m_out_bl); comp->release(); } else { send_v2_get_name(); } } template <typename I> Context *OpenRequest<I>::handle_v2_detect_header(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result == -ENOENT) { send_v1_detect_header(); } else if (*result < 0) { lderr(cct) << "failed to stat v2 image header: " << cpp_strerror(*result) << dendl; send_close_image(*result); } else { m_image_ctx->old_format = false; send_v2_get_id(); } return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_id() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::get_id_start(&op); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_v2_get_id>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(util::id_obj_name(m_image_ctx->name), comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_id(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result == 0) { auto it = m_out_bl.cbegin(); *result = cls_client::get_id_finish(&it, &m_image_ctx->id); } if (*result < 0) { lderr(cct) << "failed to retrieve image id: " << cpp_strerror(*result) << dendl; send_close_image(*result); } else { send_v2_get_initial_metadata(); } return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_name() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::dir_get_name_start(&op, m_image_ctx->id); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_name>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(RBD_DIRECTORY, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_name(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result == 0) { auto it = m_out_bl.cbegin(); *result = cls_client::dir_get_name_finish(&it, &m_image_ctx->name); } if (*result < 0 && *result != -ENOENT) { lderr(cct) << "failed to retrieve name: " << cpp_strerror(*result) << dendl; send_close_image(*result); } else if (*result == -ENOENT) { // image does not exist in directory, look in the trash bin ldout(cct, 10) << "image id " << m_image_ctx->id << " does not exist in " << "rbd directory, searching in rbd trash..." << dendl; send_v2_get_name_from_trash(); } else { send_v2_get_initial_metadata(); } return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_name_from_trash() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::trash_get_start(&op, m_image_ctx->id); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_name_from_trash>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(RBD_TRASH, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_name_from_trash(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; cls::rbd::TrashImageSpec trash_spec; if (*result == 0) { auto it = m_out_bl.cbegin(); *result = cls_client::trash_get_finish(&it, &trash_spec); m_image_ctx->name = trash_spec.name; } if (*result < 0) { if (*result == -EOPNOTSUPP) { *result = -ENOENT; } if (*result == -ENOENT) { ldout(cct, 5) << "failed to retrieve name for image id " << m_image_ctx->id << dendl; } else { lderr(cct) << "failed to retrieve name from trash: " << cpp_strerror(*result) << dendl; } send_close_image(*result); } else { send_v2_get_initial_metadata(); } return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_initial_metadata() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->old_format = false; m_image_ctx->header_oid = util::header_name(m_image_ctx->id); librados::ObjectReadOperation op; cls_client::get_size_start(&op, CEPH_NOSNAP); cls_client::get_object_prefix_start(&op); cls_client::get_features_start(&op, true); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_initial_metadata>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_initial_metadata(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; auto it = m_out_bl.cbegin(); if (*result >= 0) { uint64_t size; *result = cls_client::get_size_finish(&it, &size, &m_image_ctx->order); } if (*result >= 0) { *result = cls_client::get_object_prefix_finish(&it, &m_image_ctx->object_prefix); } if (*result >= 0) { uint64_t incompatible_features; *result = cls_client::get_features_finish(&it, &m_image_ctx->features, &incompatible_features); } if (*result < 0) { lderr(cct) << "failed to retrieve initial metadata: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } if (m_image_ctx->test_features(RBD_FEATURE_STRIPINGV2)) { send_v2_get_stripe_unit_count(); } else { send_v2_get_create_timestamp(); } return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_stripe_unit_count() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::get_stripe_unit_count_start(&op); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_stripe_unit_count>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_stripe_unit_count(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result == 0) { auto it = m_out_bl.cbegin(); *result = cls_client::get_stripe_unit_count_finish( &it, &m_image_ctx->stripe_unit, &m_image_ctx->stripe_count); } if (*result == -ENOEXEC || *result == -EINVAL) { *result = 0; } if (*result < 0) { lderr(cct) << "failed to read striping metadata: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } send_v2_get_create_timestamp(); return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_create_timestamp() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::get_create_timestamp_start(&op); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_create_timestamp>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_create_timestamp(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result == 0) { auto it = m_out_bl.cbegin(); *result = cls_client::get_create_timestamp_finish(&it, &m_image_ctx->create_timestamp); } if (*result < 0 && *result != -EOPNOTSUPP) { lderr(cct) << "failed to retrieve create_timestamp: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } send_v2_get_access_modify_timestamp(); return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_access_modify_timestamp() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::get_access_timestamp_start(&op); cls_client::get_modify_timestamp_start(&op); //TODO: merge w/ create timestamp query after luminous EOLed using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_access_modify_timestamp>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_access_modify_timestamp(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result == 0) { auto it = m_out_bl.cbegin(); *result = cls_client::get_access_timestamp_finish(&it, &m_image_ctx->access_timestamp); if (*result == 0) *result = cls_client::get_modify_timestamp_finish(&it, &m_image_ctx->modify_timestamp); } if (*result < 0 && *result != -EOPNOTSUPP) { lderr(cct) << "failed to retrieve access/modify_timestamp: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } send_v2_get_data_pool(); return nullptr; } template <typename I> void OpenRequest<I>::send_v2_get_data_pool() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::get_data_pool_start(&op); using klass = OpenRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_data_pool>(this); m_out_bl.clear(); m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *OpenRequest<I>::handle_v2_get_data_pool(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; int64_t data_pool_id = -1; if (*result == 0) { auto it = m_out_bl.cbegin(); *result = cls_client::get_data_pool_finish(&it, &data_pool_id); } else if (*result == -EOPNOTSUPP) { *result = 0; } if (*result < 0) { lderr(cct) << "failed to read data pool: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } if (data_pool_id != -1) { *result = util::create_ioctx(m_image_ctx->md_ctx, "data pool", data_pool_id, {}, &m_image_ctx->data_ctx); if (*result < 0) { if (*result != -ENOENT) { send_close_image(*result); return nullptr; } m_image_ctx->data_ctx.close(); } else { m_image_ctx->rebuild_data_io_context(); } } else { data_pool_id = m_image_ctx->md_ctx.get_id(); } m_image_ctx->init_layout(data_pool_id); send_refresh(); return nullptr; } template <typename I> void OpenRequest<I>::send_refresh() { m_image_ctx->init(); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_image_ctx->config_watcher = ConfigWatcher<I>::create(*m_image_ctx); m_image_ctx->config_watcher->init(); using klass = OpenRequest<I>; RefreshRequest<I> *req = RefreshRequest<I>::create( *m_image_ctx, false, m_skip_open_parent_image, create_context_callback<klass, &klass::handle_refresh>(this)); req->send(); } template <typename I> Context *OpenRequest<I>::handle_refresh(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to refresh image: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } send_init_plugin_registry(); return nullptr; } template <typename I> void OpenRequest<I>::send_init_plugin_registry() { CephContext *cct = m_image_ctx->cct; auto plugins = m_image_ctx->config.template get_val<std::string>( "rbd_plugins"); ldout(cct, 10) << __func__ << ": plugins=" << plugins << dendl; auto ctx = create_context_callback< OpenRequest<I>, &OpenRequest<I>::handle_init_plugin_registry>(this); m_image_ctx->plugin_registry->init(plugins, ctx); } template <typename I> Context* OpenRequest<I>::handle_init_plugin_registry(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to initialize plugin registry: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } return send_init_cache(result); } template <typename I> Context *OpenRequest<I>::send_init_cache(int *result) { if (!m_image_ctx->cache || m_image_ctx->child != nullptr || !m_image_ctx->data_ctx.is_valid()) { return send_register_watch(result); } CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; size_t max_dirty = m_image_ctx->config.template get_val<Option::size_t>( "rbd_cache_max_dirty"); auto writethrough_until_flush = m_image_ctx->config.template get_val<bool>( "rbd_cache_writethrough_until_flush"); auto cache_policy = m_image_ctx->config.template get_val<std::string>( "rbd_cache_policy"); if (cache_policy == "writearound") { auto cache = cache::WriteAroundObjectDispatch<I>::create( m_image_ctx, max_dirty, writethrough_until_flush); cache->init(); m_image_ctx->readahead.set_max_readahead_size(0); } else if (cache_policy == "writethrough" || cache_policy == "writeback") { if (cache_policy == "writethrough") { max_dirty = 0; } auto cache = cache::ObjectCacherObjectDispatch<I>::create( m_image_ctx, max_dirty, writethrough_until_flush); cache->init(); // readahead requires the object cacher cache m_image_ctx->readahead.set_trigger_requests( m_image_ctx->config.template get_val<uint64_t>("rbd_readahead_trigger_requests")); m_image_ctx->readahead.set_max_readahead_size( m_image_ctx->config.template get_val<Option::size_t>("rbd_readahead_max_bytes")); } return send_register_watch(result); } template <typename I> Context *OpenRequest<I>::send_register_watch(int *result) { if ((m_image_ctx->read_only_flags & IMAGE_READ_ONLY_FLAG_USER) != 0U) { return send_set_snap(result); } CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; using klass = OpenRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_register_watch>(this); m_image_ctx->register_watch(ctx); return nullptr; } template <typename I> Context *OpenRequest<I>::handle_register_watch(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result == -EPERM) { ldout(cct, 5) << "user does not have write permission" << dendl; send_close_image(*result); return nullptr; } else if (*result < 0) { lderr(cct) << "failed to register watch: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } return send_set_snap(result); } template <typename I> Context *OpenRequest<I>::send_set_snap(int *result) { if (m_image_ctx->snap_name.empty() && m_image_ctx->open_snap_id == CEPH_NOSNAP) { *result = 0; return finalize(*result); } CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; uint64_t snap_id = CEPH_NOSNAP; std::swap(m_image_ctx->open_snap_id, snap_id); if (snap_id == CEPH_NOSNAP) { std::shared_lock image_locker{m_image_ctx->image_lock}; snap_id = m_image_ctx->get_snap_id(m_image_ctx->snap_namespace, m_image_ctx->snap_name); } if (snap_id == CEPH_NOSNAP) { lderr(cct) << "failed to find snapshot " << m_image_ctx->snap_name << dendl; send_close_image(-ENOENT); return nullptr; } using klass = OpenRequest<I>; SetSnapRequest<I> *req = SetSnapRequest<I>::create( *m_image_ctx, snap_id, create_context_callback<klass, &klass::handle_set_snap>(this)); req->send(); return nullptr; } template <typename I> Context *OpenRequest<I>::handle_set_snap(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to set image snapshot: " << cpp_strerror(*result) << dendl; send_close_image(*result); return nullptr; } return finalize(*result); } template <typename I> Context *OpenRequest<I>::finalize(int r) { if (r == 0) { auto io_scheduler_cfg = m_image_ctx->config.template get_val<std::string>("rbd_io_scheduler"); if (io_scheduler_cfg == "simple" && !m_image_ctx->read_only) { auto io_scheduler = io::SimpleSchedulerObjectDispatch<I>::create(m_image_ctx); io_scheduler->init(); } } return m_on_finish; } template <typename I> void OpenRequest<I>::send_close_image(int error_result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_error_result = error_result; using klass = OpenRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_close_image>( this); CloseRequest<I> *req = CloseRequest<I>::create(m_image_ctx, ctx); req->send(); } template <typename I> Context *OpenRequest<I>::handle_close_image(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to close image: " << cpp_strerror(*result) << dendl; } if (m_error_result < 0) { *result = m_error_result; } return m_on_finish; } } // namespace image } // namespace librbd template class librbd::image::OpenRequest<librbd::ImageCtx>;
22,061
29.304945
88
cc
null
ceph-main/src/librbd/image/OpenRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_OPEN_REQUEST_H #define CEPH_LIBRBD_IMAGE_OPEN_REQUEST_H #include "include/buffer.h" #include <map> #include <string> class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class OpenRequest { public: static OpenRequest *create(ImageCtxT *image_ctx, uint64_t flags, Context *on_finish) { return new OpenRequest(image_ctx, flags, on_finish); } void send(); private: /** * @verbatim * * <start> * | * | (v1) * |-----> V1_DETECT_HEADER * | | * | \-------------------------------\ * | (v2) | * \-----> V2_DETECT_HEADER | * | | * v | * V2_GET_ID|NAME | * | | * v (skip if have name) | * V2_GET_NAME_FROM_TRASH | * | | * v | * V2_GET_INITIAL_METADATA | * | | * v | * V2_GET_STRIPE_UNIT_COUNT (skip if | * | disabled) | * v | * V2_GET_CREATE_TIMESTAMP | * | | * v | * V2_GET_ACCESS_MODIFY_TIMESTAMP | * | | * v | * V2_GET_DATA_POOL --------------> REFRESH * | * v * INIT_PLUGIN_REGISTRY * | * v * INIT_CACHE * | * v * REGISTER_WATCH (skip if * | read-only) * v * SET_SNAP (skip if no snap) * | * v * <finish> * ^ * (on error) | * * * * * * * > CLOSE ------------------------/ * * @endverbatim */ OpenRequest(ImageCtxT *image_ctx, uint64_t flags, Context *on_finish); ImageCtxT *m_image_ctx; bool m_skip_open_parent_image; Context *m_on_finish; bufferlist m_out_bl; int m_error_result; void send_v1_detect_header(); Context *handle_v1_detect_header(int *result); void send_v2_detect_header(); Context *handle_v2_detect_header(int *result); void send_v2_get_id(); Context *handle_v2_get_id(int *result); void send_v2_get_name(); Context *handle_v2_get_name(int *result); void send_v2_get_name_from_trash(); Context *handle_v2_get_name_from_trash(int *result); void send_v2_get_initial_metadata(); Context *handle_v2_get_initial_metadata(int *result); void send_v2_get_stripe_unit_count(); Context *handle_v2_get_stripe_unit_count(int *result); void send_v2_get_create_timestamp(); Context *handle_v2_get_create_timestamp(int *result); void send_v2_get_access_modify_timestamp(); Context *handle_v2_get_access_modify_timestamp(int *result); void send_v2_get_data_pool(); Context *handle_v2_get_data_pool(int *result); void send_refresh(); Context *handle_refresh(int *result); void send_init_plugin_registry(); Context* handle_init_plugin_registry(int *result); Context *send_init_cache(int *result); Context *send_register_watch(int *result); Context *handle_register_watch(int *result); Context *send_set_snap(int *result); Context *handle_set_snap(int *result); Context *finalize(int r); void send_close_image(int error_result); Context *handle_close_image(int *result); }; } // namespace image } // namespace librbd extern template class librbd::image::OpenRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_OPEN_REQUEST_H
4,830
31.206667
75
h
null
ceph-main/src/librbd/image/PreRemoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/PreRemoveRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_types.h" #include "librbd/ExclusiveLock.h" #include "librbd/Utils.h" #include "librbd/exclusive_lock/StandardPolicy.h" #include "librbd/image/ListWatchersRequest.h" #include "librbd/journal/DisabledPolicy.h" #include "librbd/operation/SnapshotRemoveRequest.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::PreRemoveRequest: " << this \ << " " << __func__ << ": " namespace librbd { namespace image { namespace { bool auto_delete_snapshot(const SnapInfo& snap_info) { auto snap_namespace_type = cls::rbd::get_snap_namespace_type( snap_info.snap_namespace); switch (snap_namespace_type) { case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH: return true; default: return false; } } bool ignore_snapshot(const SnapInfo& snap_info) { auto snap_namespace_type = cls::rbd::get_snap_namespace_type( snap_info.snap_namespace); switch (snap_namespace_type) { case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_MIRROR: return true; default: return false; } } } // anonymous namespace using util::create_context_callback; using util::create_rados_callback; template <typename I> void PreRemoveRequest<I>::send() { auto cct = m_image_ctx->cct; if (m_image_ctx->operations_disabled) { lderr(cct) << "image operations disabled due to unsupported op features" << dendl; finish(-EROFS); return; } acquire_exclusive_lock(); } template <typename I> void PreRemoveRequest<I>::acquire_exclusive_lock() { // lock for write for set_exclusive_lock_policy() std::unique_lock owner_locker{m_image_ctx->owner_lock}; if (m_image_ctx->exclusive_lock == nullptr) { owner_locker.unlock(); validate_image_removal(); return; } auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; // refuse to release exclusive lock when (in the midst of) removing // the image m_image_ctx->set_exclusive_lock_policy( new exclusive_lock::StandardPolicy<I>(m_image_ctx)); // do not attempt to open the journal when removing the image in case // it's corrupt if (m_image_ctx->test_features(RBD_FEATURE_JOURNALING)) { std::unique_lock image_locker{m_image_ctx->image_lock}; m_image_ctx->set_journal_policy(new journal::DisabledPolicy()); } m_exclusive_lock = m_image_ctx->exclusive_lock; auto ctx = create_context_callback< PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_exclusive_lock>(this, m_exclusive_lock); m_exclusive_lock->acquire_lock(ctx); } template <typename I> void PreRemoveRequest<I>::handle_exclusive_lock(int r) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0 || !m_image_ctx->exclusive_lock->is_lock_owner()) { if (!m_force) { lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl; finish(-EBUSY); } else { ldout(cct, 5) << "cannot obtain exclusive lock - " << "proceeding due to force flag set" << dendl; shut_down_exclusive_lock(); } return; } validate_image_removal(); } template <typename I> void PreRemoveRequest<I>::shut_down_exclusive_lock() { std::shared_lock owner_locker{m_image_ctx->owner_lock}; if (m_image_ctx->exclusive_lock == nullptr) { owner_locker.unlock(); validate_image_removal(); return; } auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; auto ctx = create_context_callback< PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_shut_down_exclusive_lock>(this); m_exclusive_lock = m_image_ctx->exclusive_lock; m_exclusive_lock->shut_down(ctx); } template <typename I> void PreRemoveRequest<I>::handle_shut_down_exclusive_lock(int r) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "r=" << r << dendl; m_exclusive_lock->put(); m_exclusive_lock = nullptr; if (r < 0) { lderr(cct) << "error shutting down exclusive lock: " << cpp_strerror(r) << dendl; finish(r); return; } ceph_assert(m_image_ctx->exclusive_lock == nullptr); validate_image_removal(); } template <typename I> void PreRemoveRequest<I>::validate_image_removal() { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; if (!m_image_ctx->ignore_migrating && m_image_ctx->test_features(RBD_FEATURE_MIGRATING)) { lderr(cct) << "image in migration state - not removing" << dendl; finish(-EBUSY); return; } check_image_snaps(); } template <typename I> void PreRemoveRequest<I>::check_image_snaps() { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; m_image_ctx->image_lock.lock_shared(); for (auto& snap_info : m_image_ctx->snap_info) { if (auto_delete_snapshot(snap_info.second)) { m_snap_infos.insert(snap_info); } else if (!ignore_snapshot(snap_info.second)) { m_image_ctx->image_lock.unlock_shared(); ldout(cct, 5) << "image has snapshots - not removing" << dendl; finish(-ENOTEMPTY); return; } } m_image_ctx->image_lock.unlock_shared(); list_image_watchers(); } template <typename I> void PreRemoveRequest<I>::list_image_watchers() { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; int flags = LIST_WATCHERS_FILTER_OUT_MY_INSTANCE | LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES; auto ctx = create_context_callback< PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_list_image_watchers>(this); auto req = ListWatchersRequest<I>::create(*m_image_ctx, flags, &m_watchers, ctx); req->send(); } template <typename I> void PreRemoveRequest<I>::handle_list_image_watchers(int r) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "r=" << r << dendl; if (r < 0) { lderr(cct) << "error listing image watchers: " << cpp_strerror(r) << dendl; finish(r); return; } check_image_watchers(); } template <typename I> void PreRemoveRequest<I>::check_image_watchers() { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; if (!m_watchers.empty()) { lderr(cct) << "image has watchers - not removing" << dendl; finish(-EBUSY); return; } check_group(); } template <typename I> void PreRemoveRequest<I>::check_group() { if (m_image_ctx->old_format) { finish(0); return; } auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; librados::ObjectReadOperation op; librbd::cls_client::image_group_get_start(&op); auto rados_completion = create_rados_callback< PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_check_group>(this); m_out_bl.clear(); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, rados_completion, &op, &m_out_bl); ceph_assert(r == 0); rados_completion->release(); } template <typename I> void PreRemoveRequest<I>::handle_check_group(int r) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "r=" << r << dendl; cls::rbd::GroupSpec s; if (r == 0) { auto it = m_out_bl.cbegin(); r = librbd::cls_client::image_group_get_finish(&it, &s); } if (r < 0 && r != -EOPNOTSUPP) { lderr(cct) << "error fetching group for image: " << cpp_strerror(r) << dendl; finish(r); return; } if (s.is_valid()) { lderr(cct) << "image is in a group - not removing" << dendl; finish(-EMLINK); return; } remove_snapshot(); } template <typename I> void PreRemoveRequest<I>::remove_snapshot() { if (m_snap_infos.empty()) { finish(0); return; } auto cct = m_image_ctx->cct; auto snap_id = m_snap_infos.begin()->first; auto& snap_info = m_snap_infos.begin()->second; ldout(cct, 20) << "snap_id=" << snap_id << ", " << "snap_name=" << snap_info.name << dendl; std::shared_lock owner_lock{m_image_ctx->owner_lock}; auto ctx = create_context_callback< PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_remove_snapshot>(this); auto req = librbd::operation::SnapshotRemoveRequest<I>::create( *m_image_ctx, snap_info.snap_namespace, snap_info.name, snap_id, ctx); req->send(); } template <typename I> void PreRemoveRequest<I>::handle_remove_snapshot(int r) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "r=" << r << dendl; if (r == -EBUSY) { ldout(cct, 5) << "skipping attached child" << dendl; if (m_ret_val == 0) { m_ret_val = -ECHILD; } } else if (r < 0 && r != -ENOENT) { auto snap_id = m_snap_infos.begin()->first; lderr(cct) << "failed to auto-prune snapshot " << snap_id << ": " << cpp_strerror(r) << dendl; finish(r); return; } ceph_assert(!m_snap_infos.empty()); m_snap_infos.erase(m_snap_infos.begin()); remove_snapshot(); } template <typename I> void PreRemoveRequest<I>::finish(int r) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "r=" << r << dendl; if (m_ret_val == 0) { m_ret_val = r; } m_on_finish->complete(m_ret_val); delete this; } } // namespace image } // namespace librbd template class librbd::image::PreRemoveRequest<librbd::ImageCtx>;
9,251
25.510029
79
cc
null
ceph-main/src/librbd/image/PreRemoveRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_PRE_REMOVE_REQUEST_H #define CEPH_LIBRBD_IMAGE_PRE_REMOVE_REQUEST_H #include "include/rados/librados.hpp" #include "include/buffer.h" #include "librbd/ImageCtx.h" #include <list> #include <map> class Context; namespace librbd { namespace image { template <typename ImageCtxT> class PreRemoveRequest { public: static PreRemoveRequest *create(ImageCtxT *image_ctx, bool force, Context *on_finish) { return new PreRemoveRequest(image_ctx, force, on_finish); } PreRemoveRequest(ImageCtxT *image_ctx, bool force, Context *on_finish) : m_image_ctx(image_ctx), m_force(force), m_on_finish(on_finish) { } void send(); private: /** * @verbatim * * <start> * | (skip if * v not needed) (error) * ACQUIRE EXCLUSIVE LOCK * * * * * * > SHUT DOWN EXCLUSIVE LOCK * | | * v | * CHECK IMAGE WATCHERS <------------------/ * | * v * CHECK GROUP * | * | /------\ * | | | * v v | * REMOVE SNAPS ----/ * | * v * <finish> * * @endverbatim */ ImageCtxT* m_image_ctx; bool m_force; Context* m_on_finish; decltype(m_image_ctx->exclusive_lock) m_exclusive_lock = nullptr; bufferlist m_out_bl; std::list<obj_watch_t> m_watchers; std::map<uint64_t, SnapInfo> m_snap_infos; int m_ret_val = 0; void acquire_exclusive_lock(); void handle_exclusive_lock(int r); void shut_down_exclusive_lock(); void handle_shut_down_exclusive_lock(int r); void validate_image_removal(); void check_image_snaps(); void list_image_watchers(); void handle_list_image_watchers(int r); void check_image_watchers(); void check_group(); void handle_check_group(int r); void remove_snapshot(); void handle_remove_snapshot(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::PreRemoveRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_PRE_REMOVE_REQUEST_H
2,292
21.70297
72
h
null
ceph-main/src/librbd/image/RefreshParentRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/RefreshParentRequest.h" #include "include/rados/librados.hpp" #include "common/dout.h" #include "common/errno.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/io/ObjectDispatcherInterface.h" #include "librbd/migration/OpenSourceImageRequest.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::RefreshParentRequest: " namespace librbd { namespace image { using util::create_async_context_callback; using util::create_context_callback; template <typename I> RefreshParentRequest<I>::RefreshParentRequest( I &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info, Context *on_finish) : m_child_image_ctx(child_image_ctx), m_parent_md(parent_md), m_migration_info(migration_info), m_on_finish(on_finish), m_parent_image_ctx(nullptr), m_parent_snap_id(CEPH_NOSNAP), m_error_result(0) { } template <typename I> bool RefreshParentRequest<I>::is_refresh_required( I &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info) { ceph_assert(ceph_mutex_is_locked(child_image_ctx.image_lock)); return (is_open_required(child_image_ctx, parent_md, migration_info) || is_close_required(child_image_ctx, parent_md, migration_info)); } template <typename I> bool RefreshParentRequest<I>::is_close_required( I &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info) { return (child_image_ctx.parent != nullptr && !does_parent_exist(child_image_ctx, parent_md, migration_info)); } template <typename I> bool RefreshParentRequest<I>::is_open_required( I &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info) { return (does_parent_exist(child_image_ctx, parent_md, migration_info) && (child_image_ctx.parent == nullptr || child_image_ctx.parent->md_ctx.get_id() != parent_md.spec.pool_id || child_image_ctx.parent->md_ctx.get_namespace() != parent_md.spec.pool_namespace || child_image_ctx.parent->id != parent_md.spec.image_id || child_image_ctx.parent->snap_id != parent_md.spec.snap_id)); } template <typename I> bool RefreshParentRequest<I>::does_parent_exist( I &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info) { if (child_image_ctx.child != nullptr && child_image_ctx.child->migration_info.empty() && parent_md.overlap == 0) { // intermediate, non-migrating images should only open their parent if they // overlap return false; } return (parent_md.spec.pool_id > -1 && parent_md.overlap > 0) || !migration_info.empty(); } template <typename I> void RefreshParentRequest<I>::send() { if (is_open_required(m_child_image_ctx, m_parent_md, m_migration_info)) { send_open_parent(); } else { // parent will be closed (if necessary) during finalize send_complete(0); } } template <typename I> void RefreshParentRequest<I>::apply() { ceph_assert(ceph_mutex_is_wlocked(m_child_image_ctx.image_lock)); std::swap(m_child_image_ctx.parent, m_parent_image_ctx); } template <typename I> void RefreshParentRequest<I>::finalize(Context *on_finish) { CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_on_finish = on_finish; if (m_parent_image_ctx != nullptr) { send_close_parent(); } else { send_complete(0); } } template <typename I> void RefreshParentRequest<I>::send_open_parent() { ceph_assert(m_parent_md.spec.pool_id >= 0); CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; if (!m_migration_info.empty()) { auto ctx = create_async_context_callback( m_child_image_ctx, create_context_callback< RefreshParentRequest<I>, &RefreshParentRequest<I>::handle_open_parent, false>(this)); auto req = migration::OpenSourceImageRequest<I>::create( m_child_image_ctx.md_ctx, &m_child_image_ctx, m_parent_md.spec.snap_id, m_migration_info, &m_parent_image_ctx, ctx); req->send(); return; } librados::IoCtx parent_io_ctx; int r = util::create_ioctx(m_child_image_ctx.md_ctx, "parent image", m_parent_md.spec.pool_id, m_parent_md.spec.pool_namespace, &parent_io_ctx); if (r < 0) { send_complete(r); return; } m_parent_image_ctx = new I("", m_parent_md.spec.image_id, m_parent_md.spec.snap_id, parent_io_ctx, true); m_parent_image_ctx->child = &m_child_image_ctx; // set rados flags for reading the parent image if (m_child_image_ctx.config.template get_val<bool>("rbd_balance_parent_reads")) { m_parent_image_ctx->set_read_flag(librados::OPERATION_BALANCE_READS); } else if (m_child_image_ctx.config.template get_val<bool>("rbd_localize_parent_reads")) { m_parent_image_ctx->set_read_flag(librados::OPERATION_LOCALIZE_READS); } auto ctx = create_async_context_callback( m_child_image_ctx, create_context_callback< RefreshParentRequest<I>, &RefreshParentRequest<I>::handle_open_parent, false>(this)); m_parent_image_ctx->state->open(0U, ctx); } template <typename I> Context *RefreshParentRequest<I>::handle_open_parent(int *result) { CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << " r=" << *result << dendl; save_result(result); if (*result < 0) { lderr(cct) << "failed to open parent image: " << cpp_strerror(*result) << dendl; // image already closed by open state machine m_parent_image_ctx = nullptr; } return m_on_finish; } template <typename I> void RefreshParentRequest<I>::send_close_parent() { ceph_assert(m_parent_image_ctx != nullptr); CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; auto ctx = create_async_context_callback( m_child_image_ctx, create_context_callback< RefreshParentRequest<I>, &RefreshParentRequest<I>::handle_close_parent, false>(this)); m_parent_image_ctx->state->close(ctx); } template <typename I> Context *RefreshParentRequest<I>::handle_close_parent(int *result) { CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << " r=" << *result << dendl; m_parent_image_ctx = nullptr; if (*result < 0) { lderr(cct) << "failed to close parent image: " << cpp_strerror(*result) << dendl; } send_reset_existence_cache(); return nullptr; } template <typename I> void RefreshParentRequest<I>::send_reset_existence_cache() { CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; Context *ctx = create_async_context_callback( m_child_image_ctx, create_context_callback< RefreshParentRequest<I>, &RefreshParentRequest<I>::handle_reset_existence_cache, false>(this)); m_child_image_ctx.io_object_dispatcher->reset_existence_cache(ctx); } template <typename I> Context *RefreshParentRequest<I>::handle_reset_existence_cache(int *result) { CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << " r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to reset object existence cache: " << cpp_strerror(*result) << dendl; } if (m_error_result < 0) { // propagate errors from opening the image *result = m_error_result; } else { *result = 0; } return m_on_finish; } template <typename I> void RefreshParentRequest<I>::send_complete(int r) { CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_on_finish->complete(r); } } // namespace image } // namespace librbd template class librbd::image::RefreshParentRequest<librbd::ImageCtx>;
8,121
32.15102
92
cc
null
ceph-main/src/librbd/image/RefreshParentRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_REFRESH_PARENT_REQUEST_H #define CEPH_LIBRBD_IMAGE_REFRESH_PARENT_REQUEST_H #include "include/int_types.h" #include "librbd/Types.h" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class RefreshParentRequest { public: static RefreshParentRequest *create(ImageCtxT &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info, Context *on_finish) { return new RefreshParentRequest(child_image_ctx, parent_md, migration_info, on_finish); } static bool is_refresh_required(ImageCtxT &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info); void send(); void apply(); void finalize(Context *on_finish); private: /** * @verbatim * * <start> * | * | (open required) * |----------------> OPEN_PARENT * * * * * * * * * * * * * * * * | | * * | v (on error) * * \----------------> <apply> * * | * * | (close required) * * |-----------------> CLOSE_PARENT * * | | * * | v * * | RESET_EXISTENCE * * | | * * | v * * \-----------------> <finish> < * * * * * * @endverbatim */ RefreshParentRequest(ImageCtxT &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info, Context *on_finish); ImageCtxT &m_child_image_ctx; ParentImageInfo m_parent_md; MigrationInfo m_migration_info; Context *m_on_finish; ImageCtxT *m_parent_image_ctx; uint64_t m_parent_snap_id; int m_error_result; static bool is_close_required(ImageCtxT &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info); static bool is_open_required(ImageCtxT &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info); static bool does_parent_exist(ImageCtxT &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info); void send_open_parent(); Context *handle_open_parent(int *result); void send_close_parent(); Context *handle_close_parent(int *result); void send_reset_existence_cache(); Context *handle_reset_existence_cache(int *result); void send_complete(int r); void save_result(int *result) { if (m_error_result == 0 && *result < 0) { m_error_result = *result; } } }; } // namespace image } // namespace librbd extern template class librbd::image::RefreshParentRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_REFRESH_PARENT_REQUEST_H
3,653
32.218182
80
h
null
ceph-main/src/librbd/image/RefreshRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/ceph_assert.h" #include "librbd/image/RefreshRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/lock/cls_lock_client.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageWatcher.h" #include "librbd/Journal.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/deep_copy/Utils.h" #include "librbd/image/GetMetadataRequest.h" #include "librbd/image/RefreshParentRequest.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ImageDispatcherInterface.h" #include "librbd/journal/Policy.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::RefreshRequest: " namespace librbd { namespace image { using util::create_rados_callback; using util::create_async_context_callback; using util::create_context_callback; template <typename I> RefreshRequest<I>::RefreshRequest(I &image_ctx, bool acquiring_lock, bool skip_open_parent, Context *on_finish) : m_image_ctx(image_ctx), m_acquiring_lock(acquiring_lock), m_skip_open_parent_image(skip_open_parent), m_on_finish(create_async_context_callback(m_image_ctx, on_finish)), m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr), m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) { m_pool_metadata_io_ctx.dup(image_ctx.md_ctx); m_pool_metadata_io_ctx.set_namespace(""); } template <typename I> RefreshRequest<I>::~RefreshRequest() { // these require state machine to close ceph_assert(m_exclusive_lock == nullptr); ceph_assert(m_object_map == nullptr); ceph_assert(m_journal == nullptr); ceph_assert(m_refresh_parent == nullptr); ceph_assert(!m_blocked_writes); } template <typename I> void RefreshRequest<I>::send() { if (m_image_ctx.old_format) { send_v1_read_header(); } else { send_v2_get_mutable_metadata(); } } template <typename I> void RefreshRequest<I>::send_get_migration_header() { if (m_image_ctx.ignore_migrating) { m_migration_spec = {}; if (m_image_ctx.old_format) { send_v1_get_snapshots(); } else { send_v2_get_metadata(); } return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::migration_get_start(&op); using klass = RefreshRequest<I>; librados::AioCompletion *comp = create_rados_callback<klass, &klass::handle_get_migration_header>(this); m_out_bl.clear(); m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_get_migration_header(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result >= 0) { auto it = m_out_bl.cbegin(); *result = cls_client::migration_get_finish(&it, &m_migration_spec); } else if (*result == -ENOENT) { ldout(cct, 5) << this << " " << __func__ << ": no migration header found" << ", retrying" << dendl; send(); return nullptr; } if (*result < 0) { lderr(cct) << "failed to retrieve migration header: " << cpp_strerror(*result) << dendl; return m_on_finish; } switch(m_migration_spec.header_type) { case cls::rbd::MIGRATION_HEADER_TYPE_SRC: if (!m_read_only) { lderr(cct) << "image being migrated" << dendl; *result = -EROFS; return m_on_finish; } ldout(cct, 1) << this << " " << __func__ << ": migrating to: " << m_migration_spec << dendl; break; case cls::rbd::MIGRATION_HEADER_TYPE_DST: ldout(cct, 1) << this << " " << __func__ << ": migrating from: " << m_migration_spec << dendl; switch (m_migration_spec.state) { case cls::rbd::MIGRATION_STATE_PREPARING: ldout(cct, 5) << this << " " << __func__ << ": current migration state: " << m_migration_spec.state << ", retrying" << dendl; send(); return nullptr; case cls::rbd::MIGRATION_STATE_PREPARED: case cls::rbd::MIGRATION_STATE_EXECUTING: case cls::rbd::MIGRATION_STATE_EXECUTED: break; case cls::rbd::MIGRATION_STATE_ABORTING: if (!m_read_only) { lderr(cct) << this << " " << __func__ << ": migration is being aborted" << dendl; *result = -EROFS; return m_on_finish; } break; default: lderr(cct) << this << " " << __func__ << ": migration is in an " << "unexpected state" << dendl; *result = -EINVAL; return m_on_finish; } break; default: ldout(cct, 1) << this << " " << __func__ << ": migration type " << m_migration_spec.header_type << dendl; *result = -EBADMSG; return m_on_finish; } if (m_image_ctx.old_format) { send_v1_get_snapshots(); } else { send_v2_get_metadata(); } return nullptr; } template <typename I> void RefreshRequest<I>::send_v1_read_header() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; op.read(0, 0, nullptr, nullptr); using klass = RefreshRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v1_read_header>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v1_read_header(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; rbd_obj_header_ondisk v1_header; bool migrating = false; if (*result < 0) { return m_on_finish; } else if (m_out_bl.length() < sizeof(v1_header)) { lderr(cct) << "v1 header too small" << dendl; *result = -EIO; return m_on_finish; } else if (memcmp(RBD_HEADER_TEXT, m_out_bl.c_str(), sizeof(RBD_HEADER_TEXT)) != 0) { if (memcmp(RBD_MIGRATE_HEADER_TEXT, m_out_bl.c_str(), sizeof(RBD_MIGRATE_HEADER_TEXT)) == 0) { ldout(cct, 1) << this << " " << __func__ << ": migration v1 header detected" << dendl; migrating = true; } else { lderr(cct) << "unrecognized v1 header" << dendl; *result = -ENXIO; return m_on_finish; } } { std::shared_lock image_locker{m_image_ctx.image_lock}; m_read_only = m_image_ctx.read_only; m_read_only_flags = m_image_ctx.read_only_flags; } memcpy(&v1_header, m_out_bl.c_str(), sizeof(v1_header)); m_order = v1_header.options.order; m_size = v1_header.image_size; m_object_prefix = v1_header.block_name; if (migrating) { send_get_migration_header(); } else { m_migration_spec = {}; send_v1_get_snapshots(); } return nullptr; } template <typename I> void RefreshRequest<I>::send_v1_get_snapshots() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::old_snapshot_list_start(&op); using klass = RefreshRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v1_get_snapshots>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v1_get_snapshots(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; std::vector<std::string> snap_names; std::vector<uint64_t> snap_sizes; if (*result >= 0) { auto it = m_out_bl.cbegin(); *result = cls_client::old_snapshot_list_finish(&it, &snap_names, &snap_sizes, &m_snapc); } if (*result < 0) { lderr(cct) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result) << dendl; return m_on_finish; } if (!m_snapc.is_valid()) { lderr(cct) << "v1 image snap context is invalid" << dendl; *result = -EIO; return m_on_finish; } m_snap_infos.clear(); for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { m_snap_infos.push_back({m_snapc.snaps[i], {cls::rbd::UserSnapshotNamespace{}}, snap_names[i], snap_sizes[i], {}, 0}); } send_v1_get_locks(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v1_get_locks() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME); using klass = RefreshRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v1_get_locks>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v1_get_locks(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; if (*result >= 0) { auto it = m_out_bl.cbegin(); ClsLockType lock_type; *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers, &lock_type, &m_lock_tag); if (*result >= 0) { m_exclusive_locked = (lock_type == ClsLockType::EXCLUSIVE); } } if (*result < 0) { lderr(cct) << "failed to retrieve locks: " << cpp_strerror(*result) << dendl; return m_on_finish; } send_v1_apply(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v1_apply() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; // ensure we are not in a rados callback when applying updates using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v1_apply>(this); m_image_ctx.op_work_queue->queue(ctx, 0); } template <typename I> Context *RefreshRequest<I>::handle_v1_apply(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; apply(); return send_flush_aio(); } template <typename I> void RefreshRequest<I>::send_v2_get_mutable_metadata() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; uint64_t snap_id; { std::shared_lock image_locker{m_image_ctx.image_lock}; snap_id = m_image_ctx.snap_id; m_read_only = m_image_ctx.read_only; m_read_only_flags = m_image_ctx.read_only_flags; } // mask out the non-primary read-only flag since its state can change bool read_only = ( ((m_read_only_flags & ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY) != 0) || (snap_id != CEPH_NOSNAP)); librados::ObjectReadOperation op; cls_client::get_size_start(&op, CEPH_NOSNAP); cls_client::get_features_start(&op, read_only); cls_client::get_flags_start(&op, CEPH_NOSNAP); cls_client::get_snapcontext_start(&op); rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME); using klass = RefreshRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_mutable_metadata>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v2_get_mutable_metadata(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; auto it = m_out_bl.cbegin(); if (*result >= 0) { uint8_t order; *result = cls_client::get_size_finish(&it, &m_size, &order); } if (*result >= 0) { *result = cls_client::get_features_finish(&it, &m_features, &m_incompatible_features); } if (*result >= 0) { *result = cls_client::get_flags_finish(&it, &m_flags); } if (*result >= 0) { *result = cls_client::get_snapcontext_finish(&it, &m_snapc); } if (*result >= 0) { ClsLockType lock_type; *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers, &lock_type, &m_lock_tag); if (*result >= 0) { m_exclusive_locked = (lock_type == ClsLockType::EXCLUSIVE); } } if (*result < 0) { lderr(cct) << "failed to retrieve mutable metadata: " << cpp_strerror(*result) << dendl; return m_on_finish; } uint64_t unsupported = m_incompatible_features & ~RBD_FEATURES_ALL; if (unsupported != 0ULL) { lderr(cct) << "Image uses unsupported features: " << unsupported << dendl; *result = -ENOSYS; return m_on_finish; } if (!m_snapc.is_valid()) { lderr(cct) << "image snap context is invalid!" << dendl; *result = -EIO; return m_on_finish; } if (m_acquiring_lock && (m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) { ldout(cct, 5) << "ignoring dynamically disabled exclusive lock" << dendl; m_features |= RBD_FEATURE_EXCLUSIVE_LOCK; m_incomplete_update = true; } else { m_incomplete_update = false; } if (((m_incompatible_features & RBD_FEATURE_NON_PRIMARY) != 0U) && ((m_read_only_flags & IMAGE_READ_ONLY_FLAG_NON_PRIMARY) == 0U) && ((m_image_ctx.read_only_mask & IMAGE_READ_ONLY_FLAG_NON_PRIMARY) != 0U)) { // implies we opened a non-primary image in R/W mode ldout(cct, 5) << "adding non-primary read-only image flag" << dendl; m_read_only_flags |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY; } else if ((((m_incompatible_features & RBD_FEATURE_NON_PRIMARY) == 0U) || ((m_image_ctx.read_only_mask & IMAGE_READ_ONLY_FLAG_NON_PRIMARY) == 0U)) && ((m_read_only_flags & IMAGE_READ_ONLY_FLAG_NON_PRIMARY) != 0U)) { ldout(cct, 5) << "removing non-primary read-only image flag" << dendl; m_read_only_flags &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY; } m_read_only = (m_read_only_flags != 0U); m_legacy_parent = false; send_v2_get_parent(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_get_parent() { // NOTE: remove support when Mimic is EOLed CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": legacy=" << m_legacy_parent << dendl; librados::ObjectReadOperation op; if (!m_legacy_parent) { cls_client::parent_get_start(&op); cls_client::parent_overlap_get_start(&op, CEPH_NOSNAP); } else { cls_client::get_parent_start(&op, CEPH_NOSNAP); } auto aio_comp = create_rados_callback< RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_parent>(this); m_out_bl.clear(); m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op, &m_out_bl); aio_comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v2_get_parent(int *result) { // NOTE: remove support when Mimic is EOLed CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; auto it = m_out_bl.cbegin(); if (!m_legacy_parent) { if (*result >= 0) { *result = cls_client::parent_get_finish(&it, &m_parent_md.spec); } std::optional<uint64_t> parent_overlap; if (*result >= 0) { *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap); } if (*result >= 0) { if (parent_overlap) { m_parent_md.overlap = *parent_overlap; m_head_parent_overlap = true; } else { m_parent_md.overlap = 0; m_head_parent_overlap = false; } } } else if (*result >= 0) { *result = cls_client::get_parent_finish(&it, &m_parent_md.spec, &m_parent_md.overlap); m_head_parent_overlap = true; } if (*result == -EOPNOTSUPP && !m_legacy_parent) { ldout(cct, 10) << "retrying using legacy parent method" << dendl; m_legacy_parent = true; send_v2_get_parent(); return nullptr; } else if (*result < 0) { lderr(cct) << "failed to retrieve parent: " << cpp_strerror(*result) << dendl; return m_on_finish; } if ((m_features & RBD_FEATURE_MIGRATING) != 0) { ldout(cct, 1) << "migrating feature set" << dendl; send_get_migration_header(); } else { m_migration_spec = {}; send_v2_get_metadata(); } return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_get_metadata() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; auto ctx = create_context_callback< RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_metadata>(this); m_metadata.clear(); auto req = GetMetadataRequest<I>::create( m_image_ctx.md_ctx, m_image_ctx.header_oid, true, ImageCtx::METADATA_CONF_PREFIX, ImageCtx::METADATA_CONF_PREFIX, 0U, &m_metadata, ctx); req->send(); } template <typename I> Context *RefreshRequest<I>::handle_v2_get_metadata(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to retrieve metadata: " << cpp_strerror(*result) << dendl; return m_on_finish; } send_v2_get_pool_metadata(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_get_pool_metadata() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; auto ctx = create_context_callback< RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_pool_metadata>(this); auto req = GetMetadataRequest<I>::create( m_pool_metadata_io_ctx, RBD_INFO, true, ImageCtx::METADATA_CONF_PREFIX, ImageCtx::METADATA_CONF_PREFIX, 0U, &m_metadata, ctx); req->send(); } template <typename I> Context *RefreshRequest<I>::handle_v2_get_pool_metadata(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to retrieve pool metadata: " << cpp_strerror(*result) << dendl; return m_on_finish; } bool thread_safe = m_image_ctx.image_watcher->is_unregistered(); m_image_ctx.apply_metadata(m_metadata, thread_safe); send_v2_get_op_features(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_get_op_features() { if ((m_features & RBD_FEATURE_OPERATIONS) == 0LL) { m_op_features = 0; send_v2_get_group(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::op_features_get_start(&op); librados::AioCompletion *comp = create_rados_callback< RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_op_features>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v2_get_op_features(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; // -EOPNOTSUPP handler not required since feature bit implies OSD // supports the method if (*result >= 0) { auto it = m_out_bl.cbegin(); *result = cls_client::op_features_get_finish(&it, &m_op_features); } if (*result < 0) { lderr(cct) << "failed to retrieve op features: " << cpp_strerror(*result) << dendl; return m_on_finish; } send_v2_get_group(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_get_group() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; cls_client::image_group_get_start(&op); using klass = RefreshRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_group>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v2_get_group(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; if (*result >= 0) { auto it = m_out_bl.cbegin(); *result = cls_client::image_group_get_finish(&it, &m_group_spec); } if (*result == -EOPNOTSUPP) { m_group_spec = {}; } else if (*result < 0) { lderr(cct) << "failed to retrieve group: " << cpp_strerror(*result) << dendl; return m_on_finish; } m_legacy_snapshot = LEGACY_SNAPSHOT_DISABLED; send_v2_get_snapshots(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_get_snapshots() { m_snap_infos.resize(m_snapc.snaps.size()); m_snap_flags.resize(m_snapc.snaps.size()); m_snap_parents.resize(m_snapc.snaps.size()); m_snap_protection.resize(m_snapc.snaps.size()); if (m_snapc.snaps.empty()) { send_v2_refresh_parent(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; librados::ObjectReadOperation op; for (auto snap_id : m_snapc.snaps) { if (m_legacy_snapshot != LEGACY_SNAPSHOT_DISABLED) { /// NOTE: remove after Luminous is retired cls_client::get_snapshot_name_start(&op, snap_id); cls_client::get_size_start(&op, snap_id); if (m_legacy_snapshot != LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP) { cls_client::get_snapshot_timestamp_start(&op, snap_id); } } else { cls_client::snapshot_get_start(&op, snap_id); } if (m_legacy_parent) { cls_client::get_parent_start(&op, snap_id); } else { cls_client::parent_overlap_get_start(&op, snap_id); } cls_client::get_flags_start(&op, snap_id); cls_client::get_protection_status_start(&op, snap_id); } using klass = RefreshRequest<I>; librados::AioCompletion *comp = create_rados_callback< klass, &klass::handle_v2_get_snapshots>(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> Context *RefreshRequest<I>::handle_v2_get_snapshots(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; auto it = m_out_bl.cbegin(); for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { if (m_legacy_snapshot != LEGACY_SNAPSHOT_DISABLED) { /// NOTE: remove after Luminous is retired std::string snap_name; if (*result >= 0) { *result = cls_client::get_snapshot_name_finish(&it, &snap_name); } uint64_t snap_size; if (*result >= 0) { uint8_t order; *result = cls_client::get_size_finish(&it, &snap_size, &order); } utime_t snap_timestamp; if (*result >= 0 && m_legacy_snapshot != LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP) { /// NOTE: remove after Jewel is retired *result = cls_client::get_snapshot_timestamp_finish(&it, &snap_timestamp); } if (*result >= 0) { m_snap_infos[i] = {m_snapc.snaps[i], {cls::rbd::UserSnapshotNamespace{}}, snap_name, snap_size, snap_timestamp, 0}; } } else if (*result >= 0) { *result = cls_client::snapshot_get_finish(&it, &m_snap_infos[i]); } if (*result >= 0) { if (m_legacy_parent) { *result = cls_client::get_parent_finish(&it, &m_snap_parents[i].spec, &m_snap_parents[i].overlap); } else { std::optional<uint64_t> parent_overlap; *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap); if (*result >= 0) { if (parent_overlap && m_parent_md.spec.pool_id > -1) { m_snap_parents[i].spec = m_parent_md.spec; m_snap_parents[i].overlap = *parent_overlap; } else { m_snap_parents[i] = {}; } } } } if (*result >= 0) { *result = cls_client::get_flags_finish(&it, &m_snap_flags[i]); } if (*result >= 0) { *result = cls_client::get_protection_status_finish( &it, &m_snap_protection[i]); } if (*result < 0) { break; } } if (*result == -ENOENT && m_enoent_retries++ < MAX_ENOENT_RETRIES) { ldout(cct, 10) << "out-of-sync snapshot state detected, retrying" << dendl; send_v2_get_mutable_metadata(); return nullptr; } else if (m_legacy_snapshot == LEGACY_SNAPSHOT_DISABLED && *result == -EOPNOTSUPP) { ldout(cct, 10) << "retrying using legacy snapshot methods" << dendl; m_legacy_snapshot = LEGACY_SNAPSHOT_ENABLED; send_v2_get_snapshots(); return nullptr; } else if (m_legacy_snapshot == LEGACY_SNAPSHOT_ENABLED && *result == -EOPNOTSUPP) { ldout(cct, 10) << "retrying using legacy snapshot methods (jewel)" << dendl; m_legacy_snapshot = LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP; send_v2_get_snapshots(); return nullptr; } else if (*result < 0) { lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result) << dendl; return m_on_finish; } send_v2_refresh_parent(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_refresh_parent() { { std::shared_lock image_locker{m_image_ctx.image_lock}; ParentImageInfo parent_md; MigrationInfo migration_info; int r = get_parent_info(m_image_ctx.snap_id, &parent_md, &migration_info); if (!m_skip_open_parent_image && (r < 0 || RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md, migration_info))) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_refresh_parent>(this); m_refresh_parent = RefreshParentRequest<I>::create( m_image_ctx, parent_md, migration_info, ctx); } } if (m_refresh_parent != nullptr) { m_refresh_parent->send(); } else { send_v2_init_exclusive_lock(); } } template <typename I> Context *RefreshRequest<I>::handle_v2_refresh_parent(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result == -ENOENT && m_enoent_retries++ < MAX_ENOENT_RETRIES) { ldout(cct, 10) << "out-of-sync parent info detected, retrying" << dendl; ceph_assert(m_refresh_parent != nullptr); delete m_refresh_parent; m_refresh_parent = nullptr; send_v2_get_mutable_metadata(); return nullptr; } else if (*result < 0) { lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result) << dendl; save_result(result); send_v2_apply(); return nullptr; } send_v2_init_exclusive_lock(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_init_exclusive_lock() { if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0 || m_read_only || !m_image_ctx.snap_name.empty() || m_image_ctx.exclusive_lock != nullptr) { send_v2_open_object_map(); return; } // implies exclusive lock dynamically enabled or image open in-progress CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; // TODO need safe shut down m_exclusive_lock = m_image_ctx.create_exclusive_lock(); using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_init_exclusive_lock>(this); std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_exclusive_lock->init(m_features, ctx); } template <typename I> Context *RefreshRequest<I>::handle_v2_init_exclusive_lock(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to initialize exclusive lock: " << cpp_strerror(*result) << dendl; save_result(result); } // object map and journal will be opened when exclusive lock is // acquired (if features are enabled) send_v2_apply(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_open_journal() { bool journal_disabled = ( (m_features & RBD_FEATURE_JOURNALING) == 0 || m_read_only || !m_image_ctx.snap_name.empty() || m_image_ctx.journal != nullptr || m_image_ctx.exclusive_lock == nullptr || !m_image_ctx.exclusive_lock->is_lock_owner()); bool journal_disabled_by_policy; { std::shared_lock image_locker{m_image_ctx.image_lock}; journal_disabled_by_policy = ( !journal_disabled && m_image_ctx.get_journal_policy()->journal_disabled()); } if (journal_disabled || journal_disabled_by_policy) { // journal dynamically enabled -- doesn't own exclusive lock if ((m_features & RBD_FEATURE_JOURNALING) != 0 && !journal_disabled_by_policy && m_image_ctx.exclusive_lock != nullptr && m_image_ctx.journal == nullptr) { auto ctx = new LambdaContext([this](int) { send_v2_block_writes(); }); m_image_ctx.exclusive_lock->set_require_lock( true, librbd::io::DIRECTION_BOTH, ctx); return; } send_v2_block_writes(); return; } // implies journal dynamically enabled since ExclusiveLock will init // the journal upon acquiring the lock CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_open_journal>(this); // TODO need safe close m_journal = m_image_ctx.create_journal(); m_journal->open(ctx); } template <typename I> Context *RefreshRequest<I>::handle_v2_open_journal(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to initialize journal: " << cpp_strerror(*result) << dendl; save_result(result); } send_v2_block_writes(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_block_writes() { bool disabled_journaling = false; { std::shared_lock image_locker{m_image_ctx.image_lock}; disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 && (m_features & RBD_FEATURE_JOURNALING) == 0 && m_image_ctx.journal != nullptr); } if (!disabled_journaling) { send_v2_apply(); return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; // we need to block writes temporarily to avoid in-flight journal // writes m_blocked_writes = true; Context *ctx = create_context_callback< RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this); std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_image_ctx.io_image_dispatcher->block_writes(ctx); } template <typename I> Context *RefreshRequest<I>::handle_v2_block_writes(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl; save_result(result); } send_v2_apply(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_open_object_map() { if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0 || m_image_ctx.object_map != nullptr || (m_image_ctx.snap_name.empty() && (m_read_only || m_image_ctx.exclusive_lock == nullptr || !m_image_ctx.exclusive_lock->is_lock_owner()))) { send_v2_open_journal(); return; } // implies object map dynamically enabled or image open in-progress // since SetSnapRequest loads the object map for a snapshot and // ExclusiveLock loads the object map for HEAD CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; if (m_image_ctx.snap_name.empty()) { m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP); } else { for (size_t snap_idx = 0; snap_idx < m_snap_infos.size(); ++snap_idx) { if (m_snap_infos[snap_idx].name == m_image_ctx.snap_name) { m_object_map = m_image_ctx.create_object_map( m_snapc.snaps[snap_idx].val); break; } } if (m_object_map == nullptr) { lderr(cct) << "failed to locate snapshot: " << m_image_ctx.snap_name << dendl; send_v2_open_journal(); return; } } using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_open_object_map>(this); m_object_map->open(ctx); } template <typename I> Context *RefreshRequest<I>::handle_v2_open_object_map(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to open object map: " << cpp_strerror(*result) << dendl; m_object_map->put(); m_object_map = nullptr; if (*result != -EFBIG) { save_result(result); } } send_v2_open_journal(); return nullptr; } template <typename I> void RefreshRequest<I>::send_v2_apply() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; // ensure we are not in a rados callback when applying updates using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_apply>(this); m_image_ctx.op_work_queue->queue(ctx, 0); } template <typename I> Context *RefreshRequest<I>::handle_v2_apply(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; apply(); return send_v2_finalize_refresh_parent(); } template <typename I> Context *RefreshRequest<I>::send_v2_finalize_refresh_parent() { if (m_refresh_parent == nullptr) { return send_v2_shut_down_exclusive_lock(); } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_finalize_refresh_parent>(this); m_refresh_parent->finalize(ctx); return nullptr; } template <typename I> Context *RefreshRequest<I>::handle_v2_finalize_refresh_parent(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; ceph_assert(m_refresh_parent != nullptr); delete m_refresh_parent; m_refresh_parent = nullptr; return send_v2_shut_down_exclusive_lock(); } template <typename I> Context *RefreshRequest<I>::send_v2_shut_down_exclusive_lock() { if (m_exclusive_lock == nullptr) { return send_v2_close_journal(); } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; // exclusive lock feature was dynamically disabled. in-flight IO will be // flushed and in-flight requests will be canceled before releasing lock using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_shut_down_exclusive_lock>(this); m_exclusive_lock->shut_down(ctx); return nullptr; } template <typename I> Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(*result) << dendl; save_result(result); } { std::unique_lock owner_locker{m_image_ctx.owner_lock}; ceph_assert(m_image_ctx.exclusive_lock == nullptr); } ceph_assert(m_exclusive_lock != nullptr); m_exclusive_lock->put(); m_exclusive_lock = nullptr; return send_v2_close_journal(); } template <typename I> Context *RefreshRequest<I>::send_v2_close_journal() { if (m_journal == nullptr) { return send_v2_close_object_map(); } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; // journal feature was dynamically disabled using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_close_journal>(this); m_journal->close(ctx); return nullptr; } template <typename I> Context *RefreshRequest<I>::handle_v2_close_journal(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { save_result(result); lderr(cct) << "failed to close journal: " << cpp_strerror(*result) << dendl; } ceph_assert(m_journal != nullptr); m_journal->put(); m_journal = nullptr; ceph_assert(m_blocked_writes); m_blocked_writes = false; m_image_ctx.io_image_dispatcher->unblock_writes(); return send_v2_close_object_map(); } template <typename I> Context *RefreshRequest<I>::send_v2_close_object_map() { if (m_object_map == nullptr) { return send_flush_aio(); } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; // object map was dynamically disabled using klass = RefreshRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_v2_close_object_map>(this); m_object_map->close(ctx); return nullptr; } template <typename I> Context *RefreshRequest<I>::handle_v2_close_object_map(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to close object map: " << cpp_strerror(*result) << dendl; } ceph_assert(m_object_map != nullptr); m_object_map->put(); m_object_map = nullptr; return send_flush_aio(); } template <typename I> Context *RefreshRequest<I>::send_flush_aio() { if (m_incomplete_update && m_error_result == 0) { // if this was a partial refresh, notify ImageState m_error_result = -ERESTART; } if (m_flush_aio) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; std::shared_lock owner_locker{m_image_ctx.owner_lock}; auto ctx = create_context_callback< RefreshRequest<I>, &RefreshRequest<I>::handle_flush_aio>(this); auto aio_comp = io::AioCompletion::create_and_start( ctx, util::get_image_ctx(&m_image_ctx), io::AIO_TYPE_FLUSH); auto req = io::ImageDispatchSpec::create_flush( m_image_ctx, io::IMAGE_DISPATCH_LAYER_REFRESH, aio_comp, io::FLUSH_SOURCE_REFRESH, {}); req->send(); return nullptr; } else if (m_error_result < 0) { // propagate saved error back to caller Context *ctx = create_context_callback< RefreshRequest<I>, &RefreshRequest<I>::handle_error>(this); m_image_ctx.op_work_queue->queue(ctx, 0); return nullptr; } return m_on_finish; } template <typename I> Context *RefreshRequest<I>::handle_flush_aio(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to flush pending AIO: " << cpp_strerror(*result) << dendl; } return handle_error(result); } template <typename I> Context *RefreshRequest<I>::handle_error(int *result) { if (m_error_result < 0) { *result = m_error_result; CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; } return m_on_finish; } template <typename I> void RefreshRequest<I>::apply() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock}; m_image_ctx.read_only_flags = m_read_only_flags; m_image_ctx.read_only = m_read_only; m_image_ctx.size = m_size; m_image_ctx.lockers = m_lockers; m_image_ctx.lock_tag = m_lock_tag; m_image_ctx.exclusive_locked = m_exclusive_locked; std::map<uint64_t, uint64_t> migration_reverse_snap_seq; if (m_image_ctx.old_format) { m_image_ctx.order = m_order; m_image_ctx.features = 0; m_image_ctx.flags = 0; m_image_ctx.op_features = 0; m_image_ctx.operations_disabled = false; m_image_ctx.object_prefix = std::move(m_object_prefix); m_image_ctx.init_layout(m_image_ctx.md_ctx.get_id()); } else { // HEAD revision doesn't have a defined overlap so it's only // applicable to snapshots if (!m_head_parent_overlap) { m_parent_md = {}; } m_image_ctx.features = m_features; m_image_ctx.flags = m_flags; m_image_ctx.op_features = m_op_features; m_image_ctx.operations_disabled = ( (m_op_features & ~RBD_OPERATION_FEATURES_ALL) != 0ULL); m_image_ctx.group_spec = m_group_spec; bool migration_info_valid; int r = get_migration_info(&m_image_ctx.parent_md, &m_image_ctx.migration_info, &migration_info_valid); ceph_assert(r == 0); // validated in refresh parent step if (migration_info_valid) { for (auto it : m_image_ctx.migration_info.snap_map) { migration_reverse_snap_seq[it.second.front()] = it.first; } } else { m_image_ctx.parent_md = m_parent_md; m_image_ctx.migration_info = {}; } librados::Rados rados(m_image_ctx.md_ctx); int8_t require_osd_release; r = rados.get_min_compatible_osd(&require_osd_release); if (r == 0 && require_osd_release >= CEPH_RELEASE_OCTOPUS) { m_image_ctx.enable_sparse_copyup = true; } } for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { std::vector<librados::snap_t>::const_iterator it = std::find( m_image_ctx.snaps.begin(), m_image_ctx.snaps.end(), m_snapc.snaps[i].val); if (it == m_image_ctx.snaps.end()) { m_flush_aio = true; ldout(cct, 20) << "new snapshot id=" << m_snapc.snaps[i].val << " name=" << m_snap_infos[i].name << " size=" << m_snap_infos[i].image_size << dendl; } } m_image_ctx.snaps.clear(); m_image_ctx.snap_info.clear(); m_image_ctx.snap_ids.clear(); auto overlap = m_image_ctx.parent_md.overlap; for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { uint64_t flags = m_image_ctx.old_format ? 0 : m_snap_flags[i]; uint8_t protection_status = m_image_ctx.old_format ? static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED) : m_snap_protection[i]; ParentImageInfo parent; if (!m_image_ctx.old_format) { if (!m_image_ctx.migration_info.empty()) { parent = m_image_ctx.parent_md; auto it = migration_reverse_snap_seq.find(m_snapc.snaps[i].val); if (it != migration_reverse_snap_seq.end()) { parent.spec.snap_id = it->second; parent.overlap = m_snap_infos[i].image_size; } else { overlap = std::min(overlap, m_snap_infos[i].image_size); parent.overlap = overlap; } } else { parent = m_snap_parents[i]; } } m_image_ctx.add_snap(m_snap_infos[i].snapshot_namespace, m_snap_infos[i].name, m_snapc.snaps[i].val, m_snap_infos[i].image_size, parent, protection_status, flags, m_snap_infos[i].timestamp); } m_image_ctx.parent_md.overlap = std::min(overlap, m_image_ctx.size); m_image_ctx.snapc = m_snapc; if (m_image_ctx.snap_id != CEPH_NOSNAP && m_image_ctx.get_snap_id(m_image_ctx.snap_namespace, m_image_ctx.snap_name) != m_image_ctx.snap_id) { lderr(cct) << "tried to read from a snapshot that no longer exists: " << m_image_ctx.snap_name << dendl; m_image_ctx.snap_exists = false; } if (m_refresh_parent != nullptr) { m_refresh_parent->apply(); } if (m_image_ctx.data_ctx.is_valid()) { m_image_ctx.data_ctx.selfmanaged_snap_set_write_ctx(m_image_ctx.snapc.seq, m_image_ctx.snaps); m_image_ctx.rebuild_data_io_context(); } // handle dynamically enabled / disabled features if (m_image_ctx.exclusive_lock != nullptr && !m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK, m_image_ctx.image_lock)) { // disabling exclusive lock will automatically handle closing // object map and journaling ceph_assert(m_exclusive_lock == nullptr); m_exclusive_lock = m_image_ctx.exclusive_lock; } else { if (m_exclusive_lock != nullptr) { ceph_assert(m_image_ctx.exclusive_lock == nullptr); std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock); } if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING, m_image_ctx.image_lock)) { if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) { m_image_ctx.exclusive_lock->unset_require_lock(io::DIRECTION_READ); } std::swap(m_journal, m_image_ctx.journal); } else if (m_journal != nullptr) { std::swap(m_journal, m_image_ctx.journal); } if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.image_lock) || m_object_map != nullptr) { std::swap(m_object_map, m_image_ctx.object_map); } } } template <typename I> int RefreshRequest<I>::get_parent_info(uint64_t snap_id, ParentImageInfo *parent_md, MigrationInfo *migration_info) { bool migration_info_valid; int r = get_migration_info(parent_md, migration_info, &migration_info_valid); if (r < 0) { return r; } if (migration_info_valid) { return 0; } else if (snap_id == CEPH_NOSNAP) { *parent_md = m_parent_md; *migration_info = {}; return 0; } else { for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { if (m_snapc.snaps[i].val == snap_id) { *parent_md = m_snap_parents[i]; *migration_info = {}; return 0; } } } return -ENOENT; } template <typename I> int RefreshRequest<I>::get_migration_info(ParentImageInfo *parent_md, MigrationInfo *migration_info, bool* migration_info_valid) { CephContext *cct = m_image_ctx.cct; if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST || (m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED && m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING && m_migration_spec.state != cls::rbd::MIGRATION_STATE_ABORTING)) { if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_SRC && m_migration_spec.pool_id != -1 && m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTED) { lderr(cct) << this << " " << __func__ << ": invalid migration spec" << dendl; return -EINVAL; } *migration_info_valid = false; return 0; } if (!m_migration_spec.source_spec.empty()) { // use special pool id just to indicate a parent (migration source image) // exists parent_md->spec.pool_id = std::numeric_limits<int64_t>::max(); parent_md->spec.pool_namespace = ""; parent_md->spec.image_id = ""; } else { parent_md->spec.pool_id = m_migration_spec.pool_id; parent_md->spec.pool_namespace = m_migration_spec.pool_namespace; parent_md->spec.image_id = m_migration_spec.image_id; } parent_md->spec.snap_id = CEPH_NOSNAP; parent_md->overlap = std::min(m_size, m_migration_spec.overlap); auto snap_seqs = m_migration_spec.snap_seqs; // If new snapshots have been created on destination image after // migration stared, map the source CEPH_NOSNAP to the earliest of // these snapshots. snapid_t snap_id = snap_seqs.empty() ? 0 : snap_seqs.rbegin()->second; auto it = std::upper_bound(m_snapc.snaps.rbegin(), m_snapc.snaps.rend(), snap_id); if (it != m_snapc.snaps.rend()) { snap_seqs[CEPH_NOSNAP] = *it; } else { snap_seqs[CEPH_NOSNAP] = CEPH_NOSNAP; } std::set<uint64_t> snap_ids; for (auto& it : snap_seqs) { snap_ids.insert(it.second); } uint64_t overlap = snap_ids.find(CEPH_NOSNAP) != snap_ids.end() ? parent_md->overlap : 0; for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { if (snap_ids.find(m_snapc.snaps[i].val) != snap_ids.end()) { overlap = std::max(overlap, m_snap_infos[i].image_size); } } *migration_info = {m_migration_spec.pool_id, m_migration_spec.pool_namespace, m_migration_spec.image_name, m_migration_spec.image_id, m_migration_spec.source_spec, {}, overlap, m_migration_spec.flatten}; *migration_info_valid = true; deep_copy::util::compute_snap_map(m_image_ctx.cct, 0, CEPH_NOSNAP, {}, snap_seqs, &migration_info->snap_map); return 0; } } // namespace image } // namespace librbd template class librbd::image::RefreshRequest<librbd::ImageCtx>;
50,603
31.109137
82
cc
null
ceph-main/src/librbd/image/RefreshRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_REFRESH_REQUEST_H #define CEPH_LIBRBD_IMAGE_REFRESH_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/utime.h" #include "common/snap_types.h" #include "cls/lock/cls_lock_types.h" #include "librbd/ImageCtx.h" #include "librbd/Types.h" #include <string> #include <vector> class Context; namespace librbd { class ImageCtx; namespace image { template<typename> class RefreshParentRequest; template<typename ImageCtxT = ImageCtx> class RefreshRequest { public: static constexpr int MAX_ENOENT_RETRIES = 10; static RefreshRequest *create(ImageCtxT &image_ctx, bool acquiring_lock, bool skip_open_parent, Context *on_finish) { return new RefreshRequest(image_ctx, acquiring_lock, skip_open_parent, on_finish); } RefreshRequest(ImageCtxT &image_ctx, bool acquiring_lock, bool skip_open_parent, Context *on_finish); ~RefreshRequest(); void send(); private: /** * @verbatim * * <start> < * * * * * * * * * * * * * * * * * * * * * * * * * * (ENOENT) * ^ | * * * | (v1) * * * |-----> V1_READ_HEADER -------------> GET_MIGRATION_HEADER (skip if not * * | | migrating) * * | (v2) v * * \-----> V2_GET_MUTABLE_METADATA V1_GET_SNAPSHOTS * * * | | * * * | -EOPNOTSUPP v * * * | * * * V1_GET_LOCKS * * * | * * | * * * v v * v * * * V2_GET_PARENT <apply> * * * | | * * v | * * * * * * GET_MIGRATION_HEADER (skip if not | * (ENOENT) | migrating) | * v | * * V2_GET_METADATA | * * | | * * v | * * V2_GET_POOL_METADATA | * * | | * * v (skip if not enabled) | * * V2_GET_OP_FEATURES | * * | | * * v | * * V2_GET_GROUP | * * | | * * | -EOPNOTSUPP | * * | * * * | * * | * * | * * v v * | * * * V2_GET_SNAPSHOTS (skip if no snaps) | * (ENOENT) | | * * v | * * * V2_REFRESH_PARENT (skip if no parent or | * (ENOENT) | refresh not needed) | * v | * V2_INIT_EXCLUSIVE_LOCK (skip if lock | * | active or disabled) | * v | * V2_OPEN_OBJECT_MAP (skip if map | * | active or disabled) | * v | * V2_OPEN_JOURNAL (skip if journal | * | active or disabled) | * v | * V2_BLOCK_WRITES (skip if journal not | * | disabled) | * v | * <apply> | * | | * v | * V2_FINALIZE_REFRESH_PARENT (skip if refresh | * | not needed) | * (error) v | * * * * * > V2_SHUT_DOWN_EXCLUSIVE_LOCK (skip if lock | * | active or enabled) | * v | * V2_CLOSE_JOURNAL (skip if journal inactive | * | or enabled) | * v | * V2_CLOSE_OBJECT_MAP (skip if map inactive | * | or enabled) | * | | * \-------------------\/--------------------/ * | * v * FLUSH (skip if no new * | snapshots) * v * <finish> * * @endverbatim */ enum LegacySnapshot { LEGACY_SNAPSHOT_DISABLED, LEGACY_SNAPSHOT_ENABLED, LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP }; ImageCtxT &m_image_ctx; bool m_acquiring_lock; bool m_skip_open_parent_image; Context *m_on_finish; cls::rbd::MigrationSpec m_migration_spec; int m_error_result; bool m_flush_aio; decltype(m_image_ctx.exclusive_lock) m_exclusive_lock; decltype(m_image_ctx.object_map) m_object_map; decltype(m_image_ctx.journal) m_journal; RefreshParentRequest<ImageCtxT> *m_refresh_parent; bufferlist m_out_bl; bool m_legacy_parent = false; LegacySnapshot m_legacy_snapshot = LEGACY_SNAPSHOT_DISABLED; int m_enoent_retries = 0; uint8_t m_order = 0; uint64_t m_size = 0; uint64_t m_features = 0; uint64_t m_incompatible_features = 0; uint64_t m_flags = 0; uint64_t m_op_features = 0; uint32_t m_read_only_flags = 0U; bool m_read_only = false; librados::IoCtx m_pool_metadata_io_ctx; std::map<std::string, bufferlist> m_metadata; std::string m_object_prefix; ParentImageInfo m_parent_md; bool m_head_parent_overlap = false; cls::rbd::GroupSpec m_group_spec; ::SnapContext m_snapc; std::vector<cls::rbd::SnapshotInfo> m_snap_infos; std::vector<ParentImageInfo> m_snap_parents; std::vector<uint8_t> m_snap_protection; std::vector<uint64_t> m_snap_flags; std::map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t> m_lockers; std::string m_lock_tag; bool m_exclusive_locked = false; bool m_blocked_writes = false; bool m_incomplete_update = false; void send_get_migration_header(); Context *handle_get_migration_header(int *result); void send_v1_read_header(); Context *handle_v1_read_header(int *result); void send_v1_get_snapshots(); Context *handle_v1_get_snapshots(int *result); void send_v1_get_locks(); Context *handle_v1_get_locks(int *result); void send_v1_apply(); Context *handle_v1_apply(int *result); void send_v2_get_mutable_metadata(); Context *handle_v2_get_mutable_metadata(int *result); void send_v2_get_parent(); Context *handle_v2_get_parent(int *result); void send_v2_get_metadata(); Context *handle_v2_get_metadata(int *result); void send_v2_get_pool_metadata(); Context *handle_v2_get_pool_metadata(int *result); void send_v2_get_op_features(); Context *handle_v2_get_op_features(int *result); void send_v2_get_group(); Context *handle_v2_get_group(int *result); void send_v2_get_snapshots(); Context *handle_v2_get_snapshots(int *result); void send_v2_get_snapshots_legacy(); Context *handle_v2_get_snapshots_legacy(int *result); void send_v2_refresh_parent(); Context *handle_v2_refresh_parent(int *result); void send_v2_init_exclusive_lock(); Context *handle_v2_init_exclusive_lock(int *result); void send_v2_open_journal(); Context *handle_v2_open_journal(int *result); void send_v2_block_writes(); Context *handle_v2_block_writes(int *result); void send_v2_open_object_map(); Context *handle_v2_open_object_map(int *result); void send_v2_apply(); Context *handle_v2_apply(int *result); Context *send_v2_finalize_refresh_parent(); Context *handle_v2_finalize_refresh_parent(int *result); Context *send_v2_shut_down_exclusive_lock(); Context *handle_v2_shut_down_exclusive_lock(int *result); Context *send_v2_close_journal(); Context *handle_v2_close_journal(int *result); Context *send_v2_close_object_map(); Context *handle_v2_close_object_map(int *result); Context *send_flush_aio(); Context *handle_flush_aio(int *result); Context *handle_error(int *result); void save_result(int *result) { if (m_error_result == 0 && *result < 0) { m_error_result = *result; } } void apply(); int get_parent_info(uint64_t snap_id, ParentImageInfo *parent_md, MigrationInfo *migration_info); int get_migration_info(ParentImageInfo *parent_md, MigrationInfo *migration_info, bool* migration_info_valid); }; } // namespace image } // namespace librbd extern template class librbd::image::RefreshRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_REFRESH_REQUEST_H
10,041
35.384058
79
h
null
ceph-main/src/librbd/image/RemoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/RemoveRequest.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/internal.h" #include "librbd/ImageState.h" #include "librbd/Journal.h" #include "librbd/ObjectMap.h" #include "librbd/image/DetachChildRequest.h" #include "librbd/image/PreRemoveRequest.h" #include "librbd/journal/RemoveRequest.h" #include "librbd/journal/TypeTraits.h" #include "librbd/mirror/DisableRequest.h" #include "librbd/operation/TrimRequest.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::RemoveRequest: " << this << " " \ << __func__ << ": " namespace librbd { namespace image { using librados::IoCtx; using util::create_context_callback; using util::create_async_context_callback; using util::create_rados_callback; template<typename I> RemoveRequest<I>::RemoveRequest(IoCtx &ioctx, const std::string &image_name, const std::string &image_id, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish) : m_ioctx(ioctx), m_image_name(image_name), m_image_id(image_id), m_force(force), m_from_trash_remove(from_trash_remove), m_prog_ctx(prog_ctx), m_op_work_queue(op_work_queue), m_on_finish(on_finish) { m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct()); } template<typename I> RemoveRequest<I>::RemoveRequest(IoCtx &ioctx, I *image_ctx, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish) : m_ioctx(ioctx), m_image_name(image_ctx->name), m_image_id(image_ctx->id), m_image_ctx(image_ctx), m_force(force), m_from_trash_remove(from_trash_remove), m_prog_ctx(prog_ctx), m_op_work_queue(op_work_queue), m_on_finish(on_finish), m_cct(image_ctx->cct), m_header_oid(image_ctx->header_oid), m_old_format(image_ctx->old_format), m_unknown_format(false) { } template<typename I> void RemoveRequest<I>::send() { ldout(m_cct, 20) << dendl; open_image(); } template<typename I> void RemoveRequest<I>::open_image() { if (m_image_ctx != nullptr) { pre_remove_image(); return; } m_image_ctx = I::create(m_image_id.empty() ? m_image_name : "", m_image_id, nullptr, m_ioctx, false); ldout(m_cct, 20) << dendl; using klass = RemoveRequest<I>; Context *ctx = create_context_callback<klass, &klass::handle_open_image>( this); m_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT, ctx); } template<typename I> void RemoveRequest<I>::handle_open_image(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0) { m_image_ctx = nullptr; if (r != -ENOENT) { lderr(m_cct) << "error opening image: " << cpp_strerror(r) << dendl; finish(r); return; } remove_image(); return; } m_image_id = m_image_ctx->id; m_image_name = m_image_ctx->name; m_header_oid = m_image_ctx->header_oid; m_old_format = m_image_ctx->old_format; m_unknown_format = false; pre_remove_image(); } template<typename I> void RemoveRequest<I>::pre_remove_image() { ldout(m_cct, 5) << dendl; auto ctx = create_context_callback< RemoveRequest<I>, &RemoveRequest<I>::handle_pre_remove_image>(this); auto req = PreRemoveRequest<I>::create(m_image_ctx, m_force, ctx); req->send(); } template<typename I> void RemoveRequest<I>::handle_pre_remove_image(int r) { ldout(m_cct, 5) << "r=" << r << dendl; if (r < 0) { if (r == -ECHILD) { r = -ENOTEMPTY; } send_close_image(r); return; } if (!m_image_ctx->data_ctx.is_valid()) { detach_child(); return; } trim_image(); } template<typename I> void RemoveRequest<I>::trim_image() { ldout(m_cct, 20) << dendl; using klass = RemoveRequest<I>; Context *ctx = create_async_context_callback( *m_image_ctx, create_context_callback< klass, &klass::handle_trim_image>(this)); std::shared_lock owner_lock{m_image_ctx->owner_lock}; auto req = librbd::operation::TrimRequest<I>::create( *m_image_ctx, ctx, m_image_ctx->size, 0, m_prog_ctx); req->send(); } template<typename I> void RemoveRequest<I>::handle_trim_image(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "failed to remove some object(s): " << cpp_strerror(r) << dendl; send_close_image(r); return; } if (m_old_format) { send_close_image(r); return; } detach_child(); } template<typename I> void RemoveRequest<I>::detach_child() { ldout(m_cct, 20) << dendl; auto ctx = create_context_callback< RemoveRequest<I>, &RemoveRequest<I>::handle_detach_child>(this); auto req = DetachChildRequest<I>::create(*m_image_ctx, ctx); req->send(); } template<typename I> void RemoveRequest<I>::handle_detach_child(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "failed to detach child from parent: " << cpp_strerror(r) << dendl; send_close_image(r); return; } send_disable_mirror(); } template<typename I> void RemoveRequest<I>::send_disable_mirror() { ldout(m_cct, 20) << dendl; using klass = RemoveRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_disable_mirror>(this); mirror::DisableRequest<I> *req = mirror::DisableRequest<I>::create(m_image_ctx, m_force, !m_force, ctx); req->send(); } template<typename I> void RemoveRequest<I>::handle_disable_mirror(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r == -EOPNOTSUPP) { r = 0; } else if (r < 0) { lderr(m_cct) << "error disabling image mirroring: " << cpp_strerror(r) << dendl; } // one last chance to ensure all snapshots have been deleted m_image_ctx->image_lock.lock_shared(); if (!m_image_ctx->snap_info.empty()) { ldout(m_cct, 5) << "image has snapshots - not removing" << dendl; m_ret_val = -ENOTEMPTY; } m_image_ctx->image_lock.unlock_shared(); send_close_image(r); } template<typename I> void RemoveRequest<I>::send_close_image(int r) { ldout(m_cct, 20) << dendl; m_ret_val = r; using klass = RemoveRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_send_close_image>(this); m_image_ctx->state->close(ctx); } template<typename I> void RemoveRequest<I>::handle_send_close_image(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0) { lderr(m_cct) << "error encountered while closing image: " << cpp_strerror(r) << dendl; } m_image_ctx = nullptr; if (m_ret_val < 0) { r = m_ret_val; finish(r); return; } remove_header(); } template<typename I> void RemoveRequest<I>::remove_header() { ldout(m_cct, 20) << dendl; using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_remove_header>(this); int r = m_ioctx.aio_remove(m_header_oid, rados_completion); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_remove_header(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "error removing header: " << cpp_strerror(r) << dendl; m_ret_val = r; } remove_image(); } template<typename I> void RemoveRequest<I>::remove_header_v2() { ldout(m_cct, 20) << dendl; if (m_header_oid.empty()) { m_header_oid = util::header_name(m_image_id); } using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_remove_header_v2>(this); int r = m_ioctx.aio_remove(m_header_oid, rados_completion); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_remove_header_v2(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "error removing header: " << cpp_strerror(r) << dendl; finish(r); return; } send_journal_remove(); } template<typename I> void RemoveRequest<I>::send_journal_remove() { ldout(m_cct, 20) << dendl; using klass = RemoveRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_journal_remove>(this); typename journal::TypeTraits<I>::ContextWQ* context_wq; Journal<I>::get_work_queue(m_cct, &context_wq); journal::RemoveRequest<I> *req = journal::RemoveRequest<I>::create( m_ioctx, m_image_id, Journal<>::IMAGE_CLIENT_ID, context_wq, ctx); req->send(); } template<typename I> void RemoveRequest<I>::handle_journal_remove(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "failed to remove image journal: " << cpp_strerror(r) << dendl; finish(r); return; } else { r = 0; } send_object_map_remove(); } template<typename I> void RemoveRequest<I>::send_object_map_remove() { ldout(m_cct, 20) << dendl; using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_object_map_remove>(this); int r = ObjectMap<>::aio_remove(m_ioctx, m_image_id, rados_completion); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_object_map_remove(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "failed to remove image journal: " << cpp_strerror(r) << dendl; finish(r); return; } else { r = 0; } mirror_image_remove(); } template<typename I> void RemoveRequest<I>::mirror_image_remove() { ldout(m_cct, 20) << dendl; librados::ObjectWriteOperation op; cls_client::mirror_image_remove(&op, m_image_id); using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_mirror_image_remove>(this); int r = m_ioctx.aio_operate(RBD_MIRRORING, rados_completion, &op); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_mirror_image_remove(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT && r != -EOPNOTSUPP) { lderr(m_cct) << "failed to remove mirror image state: " << cpp_strerror(r) << dendl; finish(r); return; } if (m_from_trash_remove) { // both the id object and the directory entry have been removed in // a previous call to trash_move. finish(0); return; } remove_id_object(); } template<typename I> void RemoveRequest<I>::remove_image() { ldout(m_cct, 20) << dendl; if (m_old_format || m_unknown_format) { remove_v1_image(); } else { remove_v2_image(); } } template<typename I> void RemoveRequest<I>::remove_v1_image() { ldout(m_cct, 20) << dendl; Context *ctx = new LambdaContext([this] (int r) { r = tmap_rm(m_ioctx, m_image_name); handle_remove_v1_image(r); }); m_op_work_queue->queue(ctx, 0); } template<typename I> void RemoveRequest<I>::handle_remove_v1_image(int r) { ldout(m_cct, 20) << "r=" << r << dendl; m_old_format = (r == 0); if (r == 0 || (r < 0 && !m_unknown_format)) { if (r < 0 && r != -ENOENT) { lderr(m_cct) << "error removing image from v1 directory: " << cpp_strerror(r) << dendl; } m_on_finish->complete(r); delete this; return; } if (!m_old_format) { remove_v2_image(); } } template<typename I> void RemoveRequest<I>::remove_v2_image() { ldout(m_cct, 20) << dendl; if (m_image_id.empty()) { dir_get_image_id(); return; } else if (m_image_name.empty()) { dir_get_image_name(); return; } remove_header_v2(); return; } template<typename I> void RemoveRequest<I>::dir_get_image_id() { ldout(m_cct, 20) << dendl; librados::ObjectReadOperation op; librbd::cls_client::dir_get_id_start(&op, m_image_name); using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_dir_get_image_id>(this); m_out_bl.clear(); int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_dir_get_image_id(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "error fetching image id: " << cpp_strerror(r) << dendl; finish(r); return; } if (r == 0) { auto iter = m_out_bl.cbegin(); r = librbd::cls_client::dir_get_id_finish(&iter, &m_image_id); if (r < 0) { finish(r); return; } } remove_header_v2(); } template<typename I> void RemoveRequest<I>::dir_get_image_name() { ldout(m_cct, 20) << dendl; librados::ObjectReadOperation op; librbd::cls_client::dir_get_name_start(&op, m_image_id); using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_dir_get_image_name>(this); m_out_bl.clear(); int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_dir_get_image_name(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "error fetching image name: " << cpp_strerror(r) << dendl; finish(r); return; } if (r == 0) { auto iter = m_out_bl.cbegin(); r = librbd::cls_client::dir_get_name_finish(&iter, &m_image_name); if (r < 0) { finish(r); return; } } remove_header_v2(); } template<typename I> void RemoveRequest<I>::remove_id_object() { ldout(m_cct, 20) << dendl; using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_remove_id_object>(this); int r = m_ioctx.aio_remove(util::id_obj_name(m_image_name), rados_completion); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_remove_id_object(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "error removing id object: " << cpp_strerror(r) << dendl; finish(r); return; } dir_remove_image(); } template<typename I> void RemoveRequest<I>::dir_remove_image() { ldout(m_cct, 20) << dendl; librados::ObjectWriteOperation op; librbd::cls_client::dir_remove_image(&op, m_image_name, m_image_id); using klass = RemoveRequest<I>; librados::AioCompletion *rados_completion = create_rados_callback<klass, &klass::handle_dir_remove_image>(this); int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op); ceph_assert(r == 0); rados_completion->release(); } template<typename I> void RemoveRequest<I>::handle_dir_remove_image(int r) { ldout(m_cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { lderr(m_cct) << "error removing image from v2 directory: " << cpp_strerror(r) << dendl; } finish(r); } template<typename I> void RemoveRequest<I>::finish(int r) { ldout(m_cct, 20) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::RemoveRequest<librbd::ImageCtx>;
15,781
24.537217
80
cc
null
ceph-main/src/librbd/image/RemoveRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_REMOVE_REQUEST_H #define CEPH_LIBRBD_IMAGE_REMOVE_REQUEST_H #include "include/rados/librados.hpp" #include "librbd/ImageCtx.h" #include "librbd/image/TypeTraits.h" #include "common/Timer.h" #include <list> class Context; namespace librbd { class ProgressContext; namespace image { template<typename ImageCtxT = ImageCtx> class RemoveRequest { private: // mock unit testing support typedef ::librbd::image::TypeTraits<ImageCtxT> TypeTraits; typedef typename TypeTraits::ContextWQ ContextWQ; public: static RemoveRequest *create(librados::IoCtx &ioctx, const std::string &image_name, const std::string &image_id, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish) { return new RemoveRequest(ioctx, image_name, image_id, force, from_trash_remove, prog_ctx, op_work_queue, on_finish); } static RemoveRequest *create(librados::IoCtx &ioctx, ImageCtxT *image_ctx, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish) { return new RemoveRequest(ioctx, image_ctx, force, from_trash_remove, prog_ctx, op_work_queue, on_finish); } void send(); private: /** * @verbatim * * <start> * | * v * (skip if already opened) OPEN IMAGE------------------\ * | | * v | * PRE REMOVE IMAGE * * * | * | * | * v * | * (skip if invalid data pool) TRIM IMAGE * * * * * | * | * | * v * | * DETACH CHILD * | * | * | * v * v * CLOSE IMAGE < * * * * | * | | * error v | * /------<--------\ REMOVE HEADER<--------------/ * | | / | * | |-------<-------/ | * | | v * | | REMOVE JOURNAL * | | / | * | |-------<-------/ | * | | v * v ^ REMOVE OBJECTMAP * | | / | * | |-------<-------/ | * | | v * | | REMOVE MIRROR IMAGE * | | / | * | |-------<-------/ | * | | v * | | REMOVE ID OBJECT * | | / | * | |-------<-------/ | * | | v * | | REMOVE IMAGE * | | / | * | \-------<-------/ | * | v * \------------------>------------<finish> * * @endverbatim */ RemoveRequest(librados::IoCtx &ioctx, const std::string &image_name, const std::string &image_id, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish); RemoveRequest(librados::IoCtx &ioctx, ImageCtxT *image_ctx, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish); librados::IoCtx &m_ioctx; std::string m_image_name; std::string m_image_id; ImageCtxT *m_image_ctx = nullptr; bool m_force; bool m_from_trash_remove; ProgressContext &m_prog_ctx; ContextWQ *m_op_work_queue; Context *m_on_finish; CephContext *m_cct; std::string m_header_oid; bool m_old_format = false; bool m_unknown_format = true; librados::IoCtx m_parent_io_ctx; decltype(m_image_ctx->exclusive_lock) m_exclusive_lock = nullptr; int m_ret_val = 0; bufferlist m_out_bl; std::list<obj_watch_t> m_watchers; std::map<uint64_t, SnapInfo> m_snap_infos; void open_image(); void handle_open_image(int r); void send_journal_remove(); void handle_journal_remove(int r); void send_object_map_remove(); void handle_object_map_remove(int r); void mirror_image_remove(); void handle_mirror_image_remove(int r); void pre_remove_image(); void handle_pre_remove_image(int r); void trim_image(); void handle_trim_image(int r); void detach_child(); void handle_detach_child(int r); void send_disable_mirror(); void handle_disable_mirror(int r); void send_close_image(int r); void handle_send_close_image(int r); void remove_header(); void handle_remove_header(int r); void remove_header_v2(); void handle_remove_header_v2(int r); void remove_image(); void remove_v1_image(); void handle_remove_v1_image(int r); void remove_v2_image(); void dir_get_image_id(); void handle_dir_get_image_id(int r); void dir_get_image_name(); void handle_dir_get_image_name(int r); void remove_id_object(); void handle_remove_id_object(int r); void dir_remove_image(); void handle_dir_remove_image(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::RemoveRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_REMOVE_REQUEST_H
6,373
31.191919
80
h
null
ceph-main/src/librbd/image/SetFlagsRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/SetFlagsRequest.h" #include "common/dout.h" #include "common/errno.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "include/ceph_assert.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::SetFlagsRequest: " namespace librbd { namespace image { using util::create_context_callback; using util::create_rados_callback; template <typename I> SetFlagsRequest<I>::SetFlagsRequest(I *image_ctx, uint64_t flags, uint64_t mask, Context *on_finish) : m_image_ctx(image_ctx), m_flags(flags), m_mask(mask), m_on_finish(on_finish) { } template <typename I> void SetFlagsRequest<I>::send() { send_set_flags(); } template <typename I> void SetFlagsRequest<I>::send_set_flags() { CephContext *cct = m_image_ctx->cct; ldout(cct, 20) << __func__ << dendl; std::unique_lock image_locker{m_image_ctx->image_lock}; std::vector<uint64_t> snap_ids; snap_ids.push_back(CEPH_NOSNAP); for (auto it : m_image_ctx->snap_info) { snap_ids.push_back(it.first); } Context *ctx = create_context_callback< SetFlagsRequest<I>, &SetFlagsRequest<I>::handle_set_flags>(this); C_Gather *gather_ctx = new C_Gather(cct, ctx); for (auto snap_id : snap_ids) { librados::ObjectWriteOperation op; cls_client::set_flags(&op, snap_id, m_flags, m_mask); librados::AioCompletion *comp = create_rados_callback(gather_ctx->new_sub()); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op); ceph_assert(r == 0); comp->release(); } gather_ctx->activate(); } template <typename I> Context *SetFlagsRequest<I>::handle_set_flags(int *result) { CephContext *cct = m_image_ctx->cct; ldout(cct, 20) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "set_flags failed: " << cpp_strerror(*result) << dendl; } return m_on_finish; } } // namespace image } // namespace librbd template class librbd::image::SetFlagsRequest<librbd::ImageCtx>;
2,169
26.468354
80
cc
null
ceph-main/src/librbd/image/SetFlagsRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H #define CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H #include "include/buffer.h" #include <map> #include <string> class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class SetFlagsRequest { public: static SetFlagsRequest *create(ImageCtxT *image_ctx, uint64_t flags, uint64_t mask, Context *on_finish) { return new SetFlagsRequest(image_ctx, flags, mask, on_finish); } void send(); private: /** * @verbatim * * <start> * | . . . * v v . * SET_FLAGS . (for every snapshot) * | . . * v . . . * <finis> * * @endverbatim */ SetFlagsRequest(ImageCtxT *image_ctx, uint64_t flags, uint64_t mask, Context *on_finish); ImageCtxT *m_image_ctx; uint64_t m_flags; uint64_t m_mask; Context *m_on_finish; void send_set_flags(); Context *handle_set_flags(int *result); }; } // namespace image } // namespace librbd extern template class librbd::image::SetFlagsRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H
1,235
18.935484
71
h
null
ceph-main/src/librbd/image/SetSnapRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/SetSnapRequest.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/image/RefreshParentRequest.h" #include "librbd/io/ImageDispatcherInterface.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::SetSnapRequest: " namespace librbd { namespace image { using util::create_context_callback; template <typename I> SetSnapRequest<I>::SetSnapRequest(I &image_ctx, uint64_t snap_id, Context *on_finish) : m_image_ctx(image_ctx), m_snap_id(snap_id), m_on_finish(on_finish), m_exclusive_lock(nullptr), m_object_map(nullptr), m_refresh_parent(nullptr), m_writes_blocked(false) { } template <typename I> SetSnapRequest<I>::~SetSnapRequest() { ceph_assert(!m_writes_blocked); delete m_refresh_parent; if (m_object_map) { m_object_map->put(); } if (m_exclusive_lock) { m_exclusive_lock->put(); } } template <typename I> void SetSnapRequest<I>::send() { if (m_snap_id == CEPH_NOSNAP) { send_init_exclusive_lock(); } else { send_block_writes(); } } template <typename I> void SetSnapRequest<I>::send_init_exclusive_lock() { { std::shared_lock image_locker{m_image_ctx.image_lock}; if (m_image_ctx.exclusive_lock != nullptr) { ceph_assert(m_image_ctx.snap_id == CEPH_NOSNAP); send_complete(); return; } } if (m_image_ctx.read_only || !m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { int r = 0; if (send_refresh_parent(&r) != nullptr) { send_complete(); } return; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << dendl; m_exclusive_lock = ExclusiveLock<I>::create(m_image_ctx); using klass = SetSnapRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_init_exclusive_lock>(this); std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_exclusive_lock->init(m_image_ctx.features, ctx); } template <typename I> Context *SetSnapRequest<I>::handle_init_exclusive_lock(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to initialize exclusive lock: " << cpp_strerror(*result) << dendl; finalize(); return m_on_finish; } return send_refresh_parent(result); } template <typename I> void SetSnapRequest<I>::send_block_writes() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << dendl; m_writes_blocked = true; using klass = SetSnapRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_block_writes>(this); std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_image_ctx.io_image_dispatcher->block_writes(ctx); } template <typename I> Context *SetSnapRequest<I>::handle_block_writes(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl; finalize(); return m_on_finish; } { std::shared_lock image_locker{m_image_ctx.image_lock}; auto it = m_image_ctx.snap_info.find(m_snap_id); if (it == m_image_ctx.snap_info.end()) { ldout(cct, 5) << "failed to locate snapshot '" << m_snap_id << "'" << dendl; *result = -ENOENT; finalize(); return m_on_finish; } } return send_shut_down_exclusive_lock(result); } template <typename I> Context *SetSnapRequest<I>::send_shut_down_exclusive_lock(int *result) { { std::shared_lock image_locker{m_image_ctx.image_lock}; m_exclusive_lock = m_image_ctx.exclusive_lock; } if (m_exclusive_lock == nullptr) { return send_refresh_parent(result); } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << dendl; using klass = SetSnapRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_shut_down_exclusive_lock>(this); m_exclusive_lock->shut_down(ctx); return nullptr; } template <typename I> Context *SetSnapRequest<I>::handle_shut_down_exclusive_lock(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(*result) << dendl; finalize(); return m_on_finish; } return send_refresh_parent(result); } template <typename I> Context *SetSnapRequest<I>::send_refresh_parent(int *result) { CephContext *cct = m_image_ctx.cct; ParentImageInfo parent_md; bool refresh_parent; { std::shared_lock image_locker{m_image_ctx.image_lock}; const auto parent_info = m_image_ctx.get_parent_info(m_snap_id); if (parent_info == nullptr) { *result = -ENOENT; lderr(cct) << "failed to retrieve snapshot parent info" << dendl; finalize(); return m_on_finish; } parent_md = *parent_info; refresh_parent = RefreshParentRequest<I>::is_refresh_required( m_image_ctx, parent_md, m_image_ctx.migration_info); } if (!refresh_parent) { if (m_snap_id == CEPH_NOSNAP) { // object map is loaded when exclusive lock is acquired *result = apply(); finalize(); return m_on_finish; } else { // load snapshot object map return send_open_object_map(result); } } ldout(cct, 10) << __func__ << dendl; using klass = SetSnapRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_refresh_parent>(this); m_refresh_parent = RefreshParentRequest<I>::create(m_image_ctx, parent_md, m_image_ctx.migration_info, ctx); m_refresh_parent->send(); return nullptr; } template <typename I> Context *SetSnapRequest<I>::handle_refresh_parent(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to refresh snapshot parent: " << cpp_strerror(*result) << dendl; finalize(); return m_on_finish; } if (m_snap_id == CEPH_NOSNAP) { // object map is loaded when exclusive lock is acquired *result = apply(); if (*result < 0) { finalize(); return m_on_finish; } return send_finalize_refresh_parent(result); } else { // load snapshot object map return send_open_object_map(result); } } template <typename I> Context *SetSnapRequest<I>::send_open_object_map(int *result) { if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) { *result = apply(); if (*result < 0) { finalize(); return m_on_finish; } return send_finalize_refresh_parent(result); } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << dendl; using klass = SetSnapRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_open_object_map>(this); m_object_map = ObjectMap<I>::create(m_image_ctx, m_snap_id); m_object_map->open(ctx); return nullptr; } template <typename I> Context *SetSnapRequest<I>::handle_open_object_map(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to open object map: " << cpp_strerror(*result) << dendl; m_object_map->put(); m_object_map = nullptr; } *result = apply(); if (*result < 0) { finalize(); return m_on_finish; } return send_finalize_refresh_parent(result); } template <typename I> Context *SetSnapRequest<I>::send_finalize_refresh_parent(int *result) { if (m_refresh_parent == nullptr) { finalize(); return m_on_finish; } CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; using klass = SetSnapRequest<I>; Context *ctx = create_context_callback< klass, &klass::handle_finalize_refresh_parent>(this); m_refresh_parent->finalize(ctx); return nullptr; } template <typename I> Context *SetSnapRequest<I>::handle_finalize_refresh_parent(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; if (*result < 0) { lderr(cct) << "failed to close parent image: " << cpp_strerror(*result) << dendl; } finalize(); return m_on_finish; } template <typename I> int SetSnapRequest<I>::apply() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << dendl; std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock}; if (m_snap_id != CEPH_NOSNAP) { ceph_assert(m_image_ctx.exclusive_lock == nullptr); int r = m_image_ctx.snap_set(m_snap_id); if (r < 0) { return r; } } else { std::swap(m_image_ctx.exclusive_lock, m_exclusive_lock); m_image_ctx.snap_unset(); } if (m_refresh_parent != nullptr) { m_refresh_parent->apply(); } std::swap(m_object_map, m_image_ctx.object_map); return 0; } template <typename I> void SetSnapRequest<I>::finalize() { if (m_writes_blocked) { m_image_ctx.io_image_dispatcher->unblock_writes(); m_writes_blocked = false; } } template <typename I> void SetSnapRequest<I>::send_complete() { finalize(); m_on_finish->complete(0); delete this; } } // namespace image } // namespace librbd template class librbd::image::SetSnapRequest<librbd::ImageCtx>;
9,795
25.547425
80
cc
null
ceph-main/src/librbd/image/SetSnapRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_SNAP_SET_REQUEST_H #define CEPH_LIBRBD_IMAGE_SNAP_SET_REQUEST_H #include "cls/rbd/cls_rbd_client.h" #include <string> class Context; namespace librbd { template <typename> class ExclusiveLock; class ImageCtx; template <typename> class ObjectMap; namespace image { template <typename> class RefreshParentRequest; template <typename ImageCtxT = ImageCtx> class SetSnapRequest { public: static SetSnapRequest *create(ImageCtxT &image_ctx, uint64_t snap_id, Context *on_finish) { return new SetSnapRequest(image_ctx, snap_id, on_finish); } ~SetSnapRequest(); void send(); private: /** * @verbatim * * <start> * | * | (set snap) * |-----------> BLOCK_WRITES * | | * | v * | SHUTDOWN_EXCLUSIVE_LOCK (skip if lock inactive * | | or disabled) * | v * | REFRESH_PARENT (skip if no parent * | | or refresh not needed) * | v * | OPEN_OBJECT_MAP (skip if map disabled) * | | * | v * | <apply> * | | * | v * | FINALIZE_REFRESH_PARENT (skip if no parent * | | or refresh not needed) * | v * | <finish> * | * \-----------> INIT_EXCLUSIVE_LOCK (skip if active or * | disabled) * v * REFRESH_PARENT (skip if no parent * | or refresh not needed) * v * <apply> * | * v * FINALIZE_REFRESH_PARENT (skip if no parent * | or refresh not needed) * v * <finish> * * @endverbatim */ SetSnapRequest(ImageCtxT &image_ctx, uint64_t snap_id, Context *on_finish); ImageCtxT &m_image_ctx; uint64_t m_snap_id; Context *m_on_finish; ExclusiveLock<ImageCtxT> *m_exclusive_lock; ObjectMap<ImageCtxT> *m_object_map; RefreshParentRequest<ImageCtxT> *m_refresh_parent; bool m_writes_blocked; void send_block_writes(); Context *handle_block_writes(int *result); void send_init_exclusive_lock(); Context *handle_init_exclusive_lock(int *result); Context *send_shut_down_exclusive_lock(int *result); Context *handle_shut_down_exclusive_lock(int *result); Context *send_refresh_parent(int *result); Context *handle_refresh_parent(int *result); Context *send_open_object_map(int *result); Context *handle_open_object_map(int *result); Context *send_finalize_refresh_parent(int *result); Context *handle_finalize_refresh_parent(int *result); int apply(); void finalize(); void send_complete(); }; } // namespace image } // namespace librbd extern template class librbd::image::SetSnapRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_SNAP_SET_REQUEST_H
3,340
27.07563
77
h
null
ceph-main/src/librbd/image/TypeTraits.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_TYPE_TRAITS_H #define CEPH_LIBRBD_IMAGE_TYPE_TRAITS_H namespace librbd { namespace asio { struct ContextWQ; } namespace image { template <typename ImageCtxT> struct TypeTraits { typedef asio::ContextWQ ContextWQ; }; } // namespace image } // namespace librbd #endif // CEPH_LIBRBD_IMAGE_TYPE_TRAITS_H
434
18.772727
70
h
null
ceph-main/src/librbd/image/Types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_IMAGE_TYPES_H #define LIBRBD_IMAGE_TYPES_H namespace librbd { namespace image { enum { CREATE_FLAG_SKIP_MIRROR_ENABLE = 1 << 0, CREATE_FLAG_FORCE_MIRROR_ENABLE = 1 << 1, CREATE_FLAG_MIRROR_ENABLE_MASK = (CREATE_FLAG_SKIP_MIRROR_ENABLE | CREATE_FLAG_FORCE_MIRROR_ENABLE), }; } // namespace image } // librbd #endif // LIBRBD_IMAGE_TYPES_H
499
22.809524
70
h
null
ceph-main/src/librbd/image/ValidatePoolRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image/ValidatePoolRequest.h" #include "include/rados/librados.hpp" #include "include/ceph_assert.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image::ValidatePoolRequest: " \ << __func__ << ": " namespace librbd { namespace image { namespace { const std::string OVERWRITE_VALIDATED("overwrite validated"); const std::string VALIDATE("validate"); } // anonymous namespace using util::create_rados_callback; using util::create_context_callback; using util::create_async_context_callback; template <typename I> ValidatePoolRequest<I>::ValidatePoolRequest(librados::IoCtx& io_ctx, Context *on_finish) : m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())), m_on_finish(on_finish) { // validation should occur in default namespace m_io_ctx.dup(io_ctx); m_io_ctx.set_namespace(""); } template <typename I> void ValidatePoolRequest<I>::send() { read_rbd_info(); } template <typename I> void ValidatePoolRequest<I>::read_rbd_info() { ldout(m_cct, 5) << dendl; auto comp = create_rados_callback< ValidatePoolRequest<I>, &ValidatePoolRequest<I>::handle_read_rbd_info>(this); librados::ObjectReadOperation op; op.read(0, 0, nullptr, nullptr); m_out_bl.clear(); int r = m_io_ctx.aio_operate(RBD_INFO, comp, &op, &m_out_bl); ceph_assert(r == 0); comp->release(); } template <typename I> void ValidatePoolRequest<I>::handle_read_rbd_info(int r) { ldout(m_cct, 5) << "r=" << r << dendl; if (r >= 0) { bufferlist validated_bl; validated_bl.append(OVERWRITE_VALIDATED); bufferlist validate_bl; validate_bl.append(VALIDATE); if (m_out_bl.contents_equal(validated_bl)) { // already validated pool finish(0); return; } else if (m_out_bl.contents_equal(validate_bl)) { // implies snapshot was already successfully created overwrite_rbd_info(); return; } } else if (r < 0 && r != -ENOENT) { lderr(m_cct) << "failed to read RBD info: " << cpp_strerror(r) << dendl; finish(r); return; } create_snapshot(); } template <typename I> void ValidatePoolRequest<I>::create_snapshot() { ldout(m_cct, 5) << dendl; // allocate a self-managed snapshot id if this a new pool to force // self-managed snapshot mode auto comp = create_rados_callback< ValidatePoolRequest<I>, &ValidatePoolRequest<I>::handle_create_snapshot>(this); m_io_ctx.aio_selfmanaged_snap_create(&m_snap_id, comp); comp->release(); } template <typename I> void ValidatePoolRequest<I>::handle_create_snapshot(int r) { ldout(m_cct, 5) << "r=" << r << dendl; if (r == -EINVAL) { lderr(m_cct) << "pool not configured for self-managed RBD snapshot support" << dendl; finish(r); return; } else if (r < 0) { lderr(m_cct) << "failed to allocate self-managed snapshot: " << cpp_strerror(r) << dendl; finish(r); return; } write_rbd_info(); } template <typename I> void ValidatePoolRequest<I>::write_rbd_info() { ldout(m_cct, 5) << dendl; bufferlist bl; bl.append(VALIDATE); librados::ObjectWriteOperation op; op.create(true); op.write(0, bl); auto comp = create_rados_callback< ValidatePoolRequest<I>, &ValidatePoolRequest<I>::handle_write_rbd_info>(this); int r = m_io_ctx.aio_operate(RBD_INFO, comp, &op); ceph_assert(r == 0); comp->release(); } template <typename I> void ValidatePoolRequest<I>::handle_write_rbd_info(int r) { ldout(m_cct, 5) << "r=" << r << dendl; if (r == -EOPNOTSUPP) { lderr(m_cct) << "pool missing required overwrite support" << dendl; m_ret_val = -EINVAL; } else if (r < 0 && r != -EEXIST) { lderr(m_cct) << "failed to write RBD info: " << cpp_strerror(r) << dendl; m_ret_val = r; } remove_snapshot(); } template <typename I> void ValidatePoolRequest<I>::remove_snapshot() { ldout(m_cct, 5) << dendl; auto comp = create_rados_callback< ValidatePoolRequest<I>, &ValidatePoolRequest<I>::handle_remove_snapshot>(this); m_io_ctx.aio_selfmanaged_snap_remove(m_snap_id, comp); comp->release(); } template <typename I> void ValidatePoolRequest<I>::handle_remove_snapshot(int r) { ldout(m_cct, 5) << "r=" << r << dendl; if (r < 0) { // not a fatal error lderr(m_cct) << "failed to remove validation snapshot: " << cpp_strerror(r) << dendl; } if (m_ret_val < 0) { finish(m_ret_val); return; } overwrite_rbd_info(); } template <typename I> void ValidatePoolRequest<I>::overwrite_rbd_info() { ldout(m_cct, 5) << dendl; bufferlist bl; bl.append(OVERWRITE_VALIDATED); librados::ObjectWriteOperation op; op.write(0, bl); auto comp = create_rados_callback< ValidatePoolRequest<I>, &ValidatePoolRequest<I>::handle_overwrite_rbd_info>(this); int r = m_io_ctx.aio_operate(RBD_INFO, comp, &op); ceph_assert(r == 0); comp->release(); } template <typename I> void ValidatePoolRequest<I>::handle_overwrite_rbd_info(int r) { ldout(m_cct, 5) << "r=" << r << dendl; if (r == -EOPNOTSUPP) { lderr(m_cct) << "pool missing required overwrite support" << dendl; finish(-EINVAL); return; } else if (r < 0) { lderr(m_cct) << "failed to validate overwrite support: " << cpp_strerror(r) << dendl; finish(r); return; } finish(0); } template <typename I> void ValidatePoolRequest<I>::finish(int r) { ldout(m_cct, 5) << "r=" << r << dendl; m_on_finish->complete(r); delete this; } } // namespace image } // namespace librbd template class librbd::image::ValidatePoolRequest<librbd::ImageCtx>;
5,952
24.331915
79
cc
null
ceph-main/src/librbd/image/ValidatePoolRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_VALIDATE_POOL_REQUEST_H #define CEPH_LIBRBD_IMAGE_VALIDATE_POOL_REQUEST_H #include "include/common_fwd.h" #include "include/rados/librados.hpp" #include "include/buffer.h" class Context; namespace librbd { struct ImageCtx; namespace asio { struct ContextWQ; } namespace image { template <typename ImageCtxT> class ValidatePoolRequest { public: static ValidatePoolRequest* create(librados::IoCtx& io_ctx, Context *on_finish) { return new ValidatePoolRequest(io_ctx, on_finish); } ValidatePoolRequest(librados::IoCtx& io_ctx, Context *on_finish); void send(); private: /** * @verbatim * * <start> * | * v (overwrites validated) * READ RBD INFO . . . . . . . . . * | . . * | . (snapshots validated) . * | . . . . . . . . . . . * v . . * CREATE SNAPSHOT . . * | . . * v . . * WRITE RBD INFO . . * | . . * v . . * REMOVE SNAPSHOT . . * | . . * v . . * OVERWRITE RBD INFO < . . . . * | . * v . * <finish> < . . . . . . . . . .` * * @endverbatim */ librados::IoCtx m_io_ctx; CephContext* m_cct; Context* m_on_finish; int m_ret_val = 0; bufferlist m_out_bl; uint64_t m_snap_id = 0; void read_rbd_info(); void handle_read_rbd_info(int r); void create_snapshot(); void handle_create_snapshot(int r); void write_rbd_info(); void handle_write_rbd_info(int r); void remove_snapshot(); void handle_remove_snapshot(int r); void overwrite_rbd_info(); void handle_overwrite_rbd_info(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::ValidatePoolRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_VALIDATE_POOL_REQUEST_H
2,228
22.712766
75
h
null
ceph-main/src/librbd/image_watcher/NotifyLockOwner.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/image_watcher/NotifyLockOwner.h" #include "common/errno.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/WatchNotifyTypes.h" #include "librbd/watcher/Notifier.h" #include <map> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::image_watcher::NotifyLockOwner: " \ << this << " " << __func__ namespace librbd { namespace image_watcher { using namespace watch_notify; using util::create_context_callback; NotifyLockOwner::NotifyLockOwner(ImageCtx &image_ctx, watcher::Notifier &notifier, bufferlist &&bl, Context *on_finish) : m_image_ctx(image_ctx), m_notifier(notifier), m_bl(std::move(bl)), m_on_finish(on_finish) { } void NotifyLockOwner::send() { send_notify(); } void NotifyLockOwner::send_notify() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << dendl; ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); m_notifier.notify(m_bl, &m_notify_response, create_context_callback< NotifyLockOwner, &NotifyLockOwner::handle_notify>(this)); } void NotifyLockOwner::handle_notify(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": r=" << r << dendl; if (r < 0 && r != -ETIMEDOUT) { lderr(cct) << ": lock owner notification failed: " << cpp_strerror(r) << dendl; finish(r); return; } bufferlist response; bool lock_owner_responded = false; for (auto &it : m_notify_response.acks) { if (it.second.length() > 0) { if (lock_owner_responded) { lderr(cct) << ": duplicate lock owners detected" << dendl; finish(-EINVAL); return; } lock_owner_responded = true; response = std::move(it.second); } } if (!lock_owner_responded) { ldout(cct, 1) << ": no lock owners detected" << dendl; finish(-ETIMEDOUT); return; } try { auto iter = response.cbegin(); ResponseMessage response_message; using ceph::decode; decode(response_message, iter); r = response_message.result; ldout(cct, 20) << " client responded with r=" << r << dendl; } catch (const buffer::error &err) { r = -EINVAL; } finish(r); } void NotifyLockOwner::finish(int r) { m_on_finish->complete(r); delete this; } } // namespace image_watcher } // namespace librbd
2,508
24.865979
74
cc
null
ceph-main/src/librbd/image_watcher/NotifyLockOwner.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_WATCHER_NOTIFY_LOCK_OWNER_H #define CEPH_LIBRBD_IMAGE_WATCHER_NOTIFY_LOCK_OWNER_H #include "include/buffer.h" #include "librbd/watcher/Types.h" class Context; namespace librbd { struct ImageCtx; namespace watcher { class Notifier; } namespace image_watcher { class NotifyLockOwner { public: static NotifyLockOwner *create(ImageCtx &image_ctx, watcher::Notifier &notifier, bufferlist &&bl, Context *on_finish) { return new NotifyLockOwner(image_ctx, notifier, std::move(bl), on_finish); } NotifyLockOwner(ImageCtx &image_ctx, watcher::Notifier &notifier, bufferlist &&bl, Context *on_finish); void send(); private: ImageCtx &m_image_ctx; watcher::Notifier &m_notifier; bufferlist m_bl; watcher::NotifyResponse m_notify_response; Context *m_on_finish; void send_notify(); void handle_notify(int r); void finish(int r); }; } // namespace image_watcher } // namespace librbd #endif // CEPH_LIBRBD_IMAGE_WATCHER_NOTIFY_LOCK_OWNER_H
1,179
22.137255
78
h
null
ceph-main/src/librbd/io/AioCompletion.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/AioCompletion.h" #include <errno.h> #include "common/ceph_context.h" #include "common/dout.h" #include "common/errno.h" #include "common/perf_counters.h" #include "librbd/AsioEngine.h" #include "librbd/ImageCtx.h" #include "librbd/internal.h" #include "librbd/Journal.h" #include "librbd/Types.h" #include <boost/asio/dispatch.hpp> #include <boost/asio/post.hpp> #ifdef WITH_LTTNG #include "tracing/librbd.h" #else #define tracepoint(...) #endif #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::AioCompletion: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { int AioCompletion::wait_for_complete() { tracepoint(librbd, aio_wait_for_complete_enter, this); { std::unique_lock<std::mutex> locker(lock); while (state != AIO_STATE_COMPLETE) { cond.wait(locker); } } tracepoint(librbd, aio_wait_for_complete_exit, 0); return 0; } void AioCompletion::finalize() { ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; // finalize any pending error results since we won't be // atomically incrementing rval anymore int err_r = error_rval; if (err_r < 0) { rval = err_r; } ssize_t r = rval; ldout(cct, 20) << "r=" << r << dendl; if (r >= 0 && aio_type == AIO_TYPE_READ) { read_result.assemble_result(cct); } } void AioCompletion::complete() { ceph_assert(ictx != nullptr); ssize_t r = rval; if ((aio_type == AIO_TYPE_CLOSE) || (aio_type == AIO_TYPE_OPEN && r < 0)) { ictx = nullptr; external_callback = false; } else { CephContext *cct = ictx->cct; tracepoint(librbd, aio_complete_enter, this, r); if (ictx->perfcounter != nullptr) { ceph::timespan elapsed = coarse_mono_clock::now() - start_time; switch (aio_type) { case AIO_TYPE_GENERIC: case AIO_TYPE_OPEN: break; case AIO_TYPE_READ: ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed); break; case AIO_TYPE_WRITE: ictx->perfcounter->tinc(l_librbd_wr_latency, elapsed); break; case AIO_TYPE_DISCARD: ictx->perfcounter->tinc(l_librbd_discard_latency, elapsed); break; case AIO_TYPE_FLUSH: ictx->perfcounter->tinc(l_librbd_flush_latency, elapsed); break; case AIO_TYPE_WRITESAME: ictx->perfcounter->tinc(l_librbd_ws_latency, elapsed); break; case AIO_TYPE_COMPARE_AND_WRITE: ictx->perfcounter->tinc(l_librbd_cmp_latency, elapsed); break; default: lderr(cct) << "completed invalid aio_type: " << aio_type << dendl; break; } } } state = AIO_STATE_CALLBACK; if (complete_cb) { if (external_callback) { complete_external_callback(); } else { complete_cb(rbd_comp, complete_arg); complete_event_socket(); notify_callbacks_complete(); } } else { complete_event_socket(); notify_callbacks_complete(); } tracepoint(librbd, aio_complete_exit); } void AioCompletion::init_time(ImageCtx *i, aio_type_t t) { if (ictx == nullptr) { ictx = i; aio_type = t; start_time = coarse_mono_clock::now(); } } void AioCompletion::start_op() { ceph_assert(ictx != nullptr); if (aio_type == AIO_TYPE_OPEN || aio_type == AIO_TYPE_CLOSE) { // no need to track async open/close operations return; } ceph_assert(!async_op.started()); async_op.start_op(*ictx); } void AioCompletion::queue_complete() { uint32_t zero = 0; pending_count.compare_exchange_strong(zero, 1); ceph_assert(zero == 0); add_request(); // ensure completion fires in clean lock context boost::asio::post(ictx->asio_engine->get_api_strand(), [this]() { complete_request(0); }); } void AioCompletion::block(CephContext* cct) { ldout(cct, 20) << dendl; ceph_assert(!was_armed); get(); ++pending_count; } void AioCompletion::unblock(CephContext* cct) { ldout(cct, 20) << dendl; ceph_assert(was_armed); uint32_t previous_pending_count = pending_count--; ceph_assert(previous_pending_count > 0); if (previous_pending_count == 1) { queue_complete(); } put(); } void AioCompletion::fail(int r) { ceph_assert(ictx != nullptr); ceph_assert(r < 0); bool queue_required = true; if (aio_type == AIO_TYPE_CLOSE || aio_type == AIO_TYPE_OPEN) { // executing from a safe context and the ImageCtx has been destructed queue_required = false; } else { CephContext *cct = ictx->cct; lderr(cct) << cpp_strerror(r) << dendl; } ceph_assert(!was_armed); was_armed = true; rval = r; uint32_t previous_pending_count = pending_count.load(); if (previous_pending_count == 0) { if (queue_required) { queue_complete(); } else { complete(); } } } void AioCompletion::set_request_count(uint32_t count) { ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; ceph_assert(!was_armed); was_armed = true; ldout(cct, 20) << "pending=" << count << dendl; uint32_t previous_pending_count = pending_count.fetch_add(count); if (previous_pending_count == 0 && count == 0) { queue_complete(); } } void AioCompletion::complete_request(ssize_t r) { ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; if (r > 0) { rval += r; } else if (r < 0 && r != -EEXIST) { // might race w/ another thread setting an error code but // first one wins int zero = 0; error_rval.compare_exchange_strong(zero, r); } uint32_t previous_pending_count = pending_count--; ceph_assert(previous_pending_count > 0); auto pending_count = previous_pending_count - 1; ldout(cct, 20) << "cb=" << complete_cb << ", " << "pending=" << pending_count << dendl; if (pending_count == 0) { finalize(); complete(); } put(); } bool AioCompletion::is_complete() { tracepoint(librbd, aio_is_complete_enter, this); bool done = (this->state != AIO_STATE_PENDING); tracepoint(librbd, aio_is_complete_exit, done); return done; } ssize_t AioCompletion::get_return_value() { tracepoint(librbd, aio_get_return_value_enter, this); ssize_t r = rval; tracepoint(librbd, aio_get_return_value_exit, r); return r; } void AioCompletion::complete_external_callback() { get(); // ensure librbd external users never experience concurrent callbacks // from multiple librbd-internal threads. boost::asio::dispatch(ictx->asio_engine->get_api_strand(), [this]() { complete_cb(rbd_comp, complete_arg); complete_event_socket(); notify_callbacks_complete(); put(); }); } void AioCompletion::complete_event_socket() { if (ictx != nullptr && event_notify && ictx->event_socket.is_valid()) { ictx->event_socket_completions.push(this); ictx->event_socket.notify(); } } void AioCompletion::notify_callbacks_complete() { state = AIO_STATE_COMPLETE; { std::unique_lock<std::mutex> locker(lock); cond.notify_all(); } if (image_dispatcher_ctx != nullptr) { image_dispatcher_ctx->complete(rval); } // note: possible for image to be closed after op marked finished if (async_op.started()) { async_op.finish_op(); } } } // namespace io } // namespace librbd
7,328
23.844068
77
cc
null
ceph-main/src/librbd/io/AioCompletion.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_AIO_COMPLETION_H #define CEPH_LIBRBD_IO_AIO_COMPLETION_H #include "common/ceph_time.h" #include "include/common_fwd.h" #include "include/Context.h" #include "include/utime.h" #include "include/rbd/librbd.hpp" #include "librbd/ImageCtx.h" #include "librbd/io/AsyncOperation.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" #include <atomic> #include <condition_variable> #include <mutex> struct Context; namespace librbd { namespace io { /** * AioCompletion is the overall completion for a single * rbd I/O request. It may be composed of many AioObjectRequests, * which each go to a single object. * * The retrying of individual requests is handled at a lower level, * so all AioCompletion cares about is the count of outstanding * requests. The number of expected individual requests should be * set initially using set_request_count() prior to issuing the * requests. This ensures that the completion will not be completed * within the caller's thread of execution (instead via a librados * context or via a thread pool context for cache read hits). */ struct AioCompletion { typedef enum { AIO_STATE_PENDING = 0, AIO_STATE_CALLBACK, AIO_STATE_COMPLETE, } aio_state_t; mutable std::mutex lock; std::condition_variable cond; callback_t complete_cb = nullptr; void *complete_arg = nullptr; rbd_completion_t rbd_comp = nullptr; /// note: only using atomic for built-in memory barrier std::atomic<aio_state_t> state{AIO_STATE_PENDING}; std::atomic<ssize_t> rval{0}; std::atomic<int> error_rval{0}; std::atomic<uint32_t> ref{1}; std::atomic<uint32_t> pending_count{0}; ///< number of requests/blocks std::atomic<bool> released{false}; ImageCtx *ictx = nullptr; coarse_mono_time start_time; aio_type_t aio_type = AIO_TYPE_NONE; ReadResult read_result; AsyncOperation async_op; bool event_notify = false; bool was_armed = false; bool external_callback = false; Context* image_dispatcher_ctx = nullptr; template <typename T, void (T::*MF)(int)> static void callback_adapter(completion_t cb, void *arg) { AioCompletion *comp = reinterpret_cast<AioCompletion *>(cb); T *t = reinterpret_cast<T *>(arg); (t->*MF)(comp->get_return_value()); comp->release(); } static AioCompletion *create(void *cb_arg, callback_t cb_complete, rbd_completion_t rbd_comp) { AioCompletion *comp = new AioCompletion(); comp->set_complete_cb(cb_arg, cb_complete); comp->rbd_comp = (rbd_comp != nullptr ? rbd_comp : comp); return comp; } template <typename T, void (T::*MF)(int) = &T::complete> static AioCompletion *create(T *obj) { AioCompletion *comp = new AioCompletion(); comp->set_complete_cb(obj, &callback_adapter<T, MF>); comp->rbd_comp = comp; return comp; } template <typename T, void (T::*MF)(int) = &T::complete> static AioCompletion *create_and_start(T *obj, ImageCtx *image_ctx, aio_type_t type) { AioCompletion *comp = create<T, MF>(obj); comp->init_time(image_ctx, type); comp->start_op(); return comp; } AioCompletion() { } ~AioCompletion() { } int wait_for_complete(); void finalize(); inline bool is_initialized(aio_type_t type) const { std::unique_lock<std::mutex> locker(lock); return ((ictx != nullptr) && (aio_type == type)); } inline bool is_started() const { std::unique_lock<std::mutex> locker(lock); return async_op.started(); } void block(CephContext* cct); void unblock(CephContext* cct); void init_time(ImageCtx *i, aio_type_t t); void start_op(); void fail(int r); void complete(); void set_complete_cb(void *cb_arg, callback_t cb) { complete_cb = cb; complete_arg = cb_arg; } void set_request_count(uint32_t num); void add_request() { ceph_assert(pending_count > 0); get(); } void complete_request(ssize_t r); bool is_complete(); ssize_t get_return_value(); void get() { ceph_assert(ref > 0); ++ref; } void release() { bool previous_released = released.exchange(true); ceph_assert(!previous_released); put(); } void put() { uint32_t previous_ref = ref--; ceph_assert(previous_ref > 0); if (previous_ref == 1) { delete this; } } void set_event_notify(bool s) { event_notify = s; } void *get_arg() { return complete_arg; } private: void queue_complete(); void complete_external_callback(); void complete_event_socket(); void notify_callbacks_complete(); }; class C_AioRequest : public Context { public: C_AioRequest(AioCompletion *completion) : m_completion(completion) { m_completion->add_request(); } ~C_AioRequest() override {} void finish(int r) override { m_completion->complete_request(r); } protected: AioCompletion *m_completion; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_AIO_COMPLETION_H
5,107
24.039216
74
h
null
ceph-main/src/librbd/io/AsyncOperation.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/AsyncOperation.h" #include "include/ceph_assert.h" #include "common/dout.h" #include "librbd/AsioEngine.h" #include "librbd/ImageCtx.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::AsyncOperation: " namespace librbd { namespace io { namespace { struct C_CompleteFlushes : public Context { ImageCtx *image_ctx; std::list<Context *> flush_contexts; explicit C_CompleteFlushes(ImageCtx *image_ctx, std::list<Context *> &&flush_contexts) : image_ctx(image_ctx), flush_contexts(std::move(flush_contexts)) { } void finish(int r) override { std::shared_lock owner_locker{image_ctx->owner_lock}; while (!flush_contexts.empty()) { Context *flush_ctx = flush_contexts.front(); flush_contexts.pop_front(); ldout(image_ctx->cct, 20) << "completed flush: " << flush_ctx << dendl; flush_ctx->complete(0); } } }; } // anonymous namespace void AsyncOperation::start_op(ImageCtx &image_ctx) { ceph_assert(m_image_ctx == NULL); m_image_ctx = &image_ctx; ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl; std::lock_guard l{m_image_ctx->async_ops_lock}; m_image_ctx->async_ops.push_front(&m_xlist_item); } void AsyncOperation::finish_op() { ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl; { std::lock_guard l{m_image_ctx->async_ops_lock}; xlist<AsyncOperation *>::iterator iter(&m_xlist_item); ++iter; ceph_assert(m_xlist_item.remove_myself()); // linked list stored newest -> oldest ops if (!iter.end() && !m_flush_contexts.empty()) { ldout(m_image_ctx->cct, 20) << "moving flush contexts to previous op: " << *iter << dendl; (*iter)->m_flush_contexts.insert((*iter)->m_flush_contexts.end(), m_flush_contexts.begin(), m_flush_contexts.end()); return; } } if (!m_flush_contexts.empty()) { C_CompleteFlushes *ctx = new C_CompleteFlushes(m_image_ctx, std::move(m_flush_contexts)); m_image_ctx->asio_engine->post(ctx, 0); } } void AsyncOperation::flush(Context* on_finish) { { std::lock_guard locker{m_image_ctx->async_ops_lock}; xlist<AsyncOperation *>::iterator iter(&m_xlist_item); ++iter; // linked list stored newest -> oldest ops if (!iter.end()) { (*iter)->m_flush_contexts.push_back(on_finish); return; } } m_image_ctx->asio_engine->post(on_finish, 0); } } // namespace io } // namespace librbd
2,761
28.073684
80
cc
null
ceph-main/src/librbd/io/AsyncOperation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_IO_ASYNC_OPERATION_H #define LIBRBD_IO_ASYNC_OPERATION_H #include "include/ceph_assert.h" #include "include/xlist.h" #include <list> class Context; namespace librbd { class ImageCtx; namespace io { class AsyncOperation { public: AsyncOperation() : m_image_ctx(NULL), m_xlist_item(this) { } ~AsyncOperation() { ceph_assert(!m_xlist_item.is_on_list()); } inline bool started() const { return m_xlist_item.is_on_list(); } void start_op(ImageCtx &image_ctx); void finish_op(); void flush(Context *on_finish); private: ImageCtx *m_image_ctx; xlist<AsyncOperation *>::item m_xlist_item; std::list<Context *> m_flush_contexts; }; } // namespace io } // namespace librbd #endif // LIBRBD_IO_ASYNC_OPERATION_H
871
15.45283
70
h
null
ceph-main/src/librbd/io/CopyupRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/CopyupRequest.h" #include "include/neorados/RADOS.hpp" #include "common/ceph_context.h" #include "common/ceph_mutex.h" #include "common/dout.h" #include "common/errno.h" #include "librbd/AsioEngine.h" #include "librbd/AsyncObjectThrottle.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/asio/Utils.h" #include "librbd/deep_copy/ObjectCopyRequest.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ObjectDispatcherInterface.h" #include "librbd/io/ObjectRequest.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Utils.h" #include <boost/lambda/bind.hpp> #include <boost/lambda/construct.hpp> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::CopyupRequest: " << this \ << " " << __func__ << ": " \ << data_object_name(m_image_ctx, m_object_no) << " " namespace librbd { namespace io { using librbd::util::data_object_name; namespace { template <typename I> class C_UpdateObjectMap : public C_AsyncObjectThrottle<I> { public: C_UpdateObjectMap(AsyncObjectThrottle<I> &throttle, I *image_ctx, uint64_t object_no, uint8_t head_object_map_state, const std::vector<uint64_t> *snap_ids, bool first_snap_is_clean, const ZTracer::Trace &trace, size_t snap_id_idx) : C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_object_no(object_no), m_head_object_map_state(head_object_map_state), m_snap_ids(*snap_ids), m_first_snap_is_clean(first_snap_is_clean), m_trace(trace), m_snap_id_idx(snap_id_idx) { } int send() override { auto& image_ctx = this->m_image_ctx; ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); if (image_ctx.exclusive_lock == nullptr) { return 1; } ceph_assert(image_ctx.exclusive_lock->is_lock_owner()); std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map == nullptr) { return 1; } uint64_t snap_id = m_snap_ids[m_snap_id_idx]; if (snap_id == CEPH_NOSNAP) { return update_head(); } else { return update_snapshot(snap_id); } } int update_head() { auto& image_ctx = this->m_image_ctx; ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); bool sent = image_ctx.object_map->template aio_update<Context>( CEPH_NOSNAP, m_object_no, m_head_object_map_state, {}, m_trace, false, this); return (sent ? 0 : 1); } int update_snapshot(uint64_t snap_id) { auto& image_ctx = this->m_image_ctx; ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); uint8_t state = OBJECT_EXISTS; if (image_ctx.test_features(RBD_FEATURE_FAST_DIFF, image_ctx.image_lock) && (m_snap_id_idx > 0 || m_first_snap_is_clean)) { // first snapshot should be exists+dirty since it contains // the copyup data -- later snapshots inherit the data. state = OBJECT_EXISTS_CLEAN; } bool sent = image_ctx.object_map->template aio_update<Context>( snap_id, m_object_no, state, {}, m_trace, true, this); ceph_assert(sent); return 0; } private: uint64_t m_object_no; uint8_t m_head_object_map_state; const std::vector<uint64_t> &m_snap_ids; bool m_first_snap_is_clean; const ZTracer::Trace &m_trace; size_t m_snap_id_idx; }; } // anonymous namespace template <typename I> CopyupRequest<I>::CopyupRequest(I *ictx, uint64_t objectno, Extents &&image_extents, ImageArea area, const ZTracer::Trace &parent_trace) : m_image_ctx(ictx), m_object_no(objectno), m_image_extents(std::move(image_extents)), m_image_area(area), m_trace(librbd::util::create_trace(*m_image_ctx, "copy-up", parent_trace)) { ceph_assert(m_image_ctx->data_ctx.is_valid()); m_async_op.start_op(*librbd::util::get_image_ctx(m_image_ctx)); } template <typename I> CopyupRequest<I>::~CopyupRequest() { ceph_assert(m_pending_requests.empty()); m_async_op.finish_op(); } template <typename I> void CopyupRequest<I>::append_request(AbstractObjectWriteRequest<I> *req, const Extents& object_extents) { std::lock_guard locker{m_lock}; auto cct = m_image_ctx->cct; ldout(cct, 20) << "object_request=" << req << ", " << "append=" << m_append_request_permitted << dendl; if (m_append_request_permitted) { m_pending_requests.push_back(req); for (auto [offset, length] : object_extents) { if (length > 0) { m_write_object_extents.union_insert(offset, length); } } } else { m_restart_requests.push_back(req); } } template <typename I> void CopyupRequest<I>::send() { read_from_parent(); } template <typename I> void CopyupRequest<I>::read_from_parent() { auto cct = m_image_ctx->cct; std::shared_lock image_locker{m_image_ctx->image_lock}; if (m_image_ctx->parent == nullptr) { ldout(cct, 5) << "parent detached" << dendl; m_image_ctx->asio_engine->post( [this]() { handle_read_from_parent(-ENOENT); }); return; } else if (is_deep_copy()) { deep_copy(); return; } auto comp = AioCompletion::create_and_start< CopyupRequest<I>, &CopyupRequest<I>::handle_read_from_parent>( this, librbd::util::get_image_ctx(m_image_ctx->parent), AIO_TYPE_READ); ldout(cct, 20) << "completion=" << comp << " image_extents=" << m_image_extents << " area=" << m_image_area << dendl; auto req = io::ImageDispatchSpec::create_read( *m_image_ctx->parent, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, comp, std::move(m_image_extents), m_image_area, ReadResult{&m_copyup_extent_map, &m_copyup_data}, m_image_ctx->parent->get_data_io_context(), 0, 0, m_trace); req->send(); } template <typename I> void CopyupRequest<I>::handle_read_from_parent(int r) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << dendl; if (r < 0 && r != -ENOENT) { m_lock.lock(); disable_append_requests(); m_lock.unlock(); lderr(cct) << "error reading from parent: " << cpp_strerror(r) << dendl; finish(r); return; } convert_copyup_extent_map(); m_image_ctx->image_lock.lock_shared(); m_lock.lock(); disable_append_requests(); r = prepare_copyup_data(); if (r < 0) { m_lock.unlock(); m_image_ctx->image_lock.unlock_shared(); lderr(m_image_ctx->cct) << "failed to prepare copyup data: " << cpp_strerror(r) << dendl; finish(r); return; } m_copyup_is_zero = m_copyup_data.is_zero(); m_copyup_required = is_copyup_required(); if (!m_copyup_required) { m_lock.unlock(); m_image_ctx->image_lock.unlock_shared(); ldout(cct, 20) << "no-op, skipping" << dendl; finish(0); return; } // copyup() will affect snapshots only if parent data is not all // zeros. if (!m_copyup_is_zero) { m_snap_ids.insert(m_snap_ids.end(), m_image_ctx->snaps.rbegin(), m_image_ctx->snaps.rend()); } m_lock.unlock(); m_image_ctx->image_lock.unlock_shared(); update_object_maps(); } template <typename I> void CopyupRequest<I>::deep_copy() { auto cct = m_image_ctx->cct; ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); ceph_assert(m_image_ctx->parent != nullptr); m_lock.lock(); m_deep_copied = true; m_flatten = is_copyup_required() ? true : m_image_ctx->migration_info.flatten; m_lock.unlock(); ldout(cct, 20) << "flatten=" << m_flatten << dendl; uint32_t flags = deep_copy::OBJECT_COPY_REQUEST_FLAG_MIGRATION; if (m_flatten) { flags |= deep_copy::OBJECT_COPY_REQUEST_FLAG_FLATTEN; } auto ctx = librbd::util::create_context_callback< CopyupRequest<I>, &CopyupRequest<I>::handle_deep_copy>(this); auto req = deep_copy::ObjectCopyRequest<I>::create( m_image_ctx->parent, m_image_ctx, 0, 0, m_image_ctx->migration_info.snap_map, m_object_no, flags, nullptr, ctx); req->send(); } template <typename I> void CopyupRequest<I>::handle_deep_copy(int r) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << dendl; m_image_ctx->image_lock.lock_shared(); m_lock.lock(); m_copyup_required = is_copyup_required(); if (r == -ENOENT && !m_flatten && m_copyup_required) { m_lock.unlock(); m_image_ctx->image_lock.unlock_shared(); ldout(cct, 10) << "restart deep-copy with flatten" << dendl; send(); return; } disable_append_requests(); if (r < 0 && r != -ENOENT) { m_lock.unlock(); m_image_ctx->image_lock.unlock_shared(); lderr(cct) << "error encountered during deep-copy: " << cpp_strerror(r) << dendl; finish(r); return; } if (!m_copyup_required && !is_update_object_map_required(r)) { m_lock.unlock(); m_image_ctx->image_lock.unlock_shared(); if (r == -ENOENT) { r = 0; } ldout(cct, 20) << "skipping" << dendl; finish(r); return; } // For deep-copy, copyup() will never affect snapshots. However, // this state machine is responsible for updating object maps for // snapshots that have been created on destination image after // migration started. if (r != -ENOENT) { compute_deep_copy_snap_ids(); } m_lock.unlock(); m_image_ctx->image_lock.unlock_shared(); update_object_maps(); } template <typename I> void CopyupRequest<I>::update_object_maps() { std::shared_lock owner_locker{m_image_ctx->owner_lock}; std::shared_lock image_locker{m_image_ctx->image_lock}; if (m_image_ctx->object_map == nullptr) { image_locker.unlock(); owner_locker.unlock(); copyup(); return; } auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; bool copy_on_read = m_pending_requests.empty(); uint8_t head_object_map_state = OBJECT_EXISTS; if (copy_on_read && !m_snap_ids.empty() && m_image_ctx->test_features(RBD_FEATURE_FAST_DIFF, m_image_ctx->image_lock)) { // HEAD is non-dirty since data is tied to first snapshot head_object_map_state = OBJECT_EXISTS_CLEAN; } auto r_it = m_pending_requests.rbegin(); if (r_it != m_pending_requests.rend()) { // last write-op determines the final object map state head_object_map_state = (*r_it)->get_pre_write_object_map_state(); } if ((*m_image_ctx->object_map)[m_object_no] != head_object_map_state) { // (maybe) need to update the HEAD object map state m_snap_ids.push_back(CEPH_NOSNAP); } image_locker.unlock(); ceph_assert(m_image_ctx->exclusive_lock->is_lock_owner()); typename AsyncObjectThrottle<I>::ContextFactory context_factory( boost::lambda::bind(boost::lambda::new_ptr<C_UpdateObjectMap<I>>(), boost::lambda::_1, m_image_ctx, m_object_no, head_object_map_state, &m_snap_ids, m_first_snap_is_clean, m_trace, boost::lambda::_2)); auto ctx = librbd::util::create_context_callback< CopyupRequest<I>, &CopyupRequest<I>::handle_update_object_maps>(this); auto throttle = new AsyncObjectThrottle<I>( nullptr, *m_image_ctx, context_factory, ctx, nullptr, 0, m_snap_ids.size()); throttle->start_ops( m_image_ctx->config.template get_val<uint64_t>("rbd_concurrent_management_ops")); } template <typename I> void CopyupRequest<I>::handle_update_object_maps(int r) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << dendl; if (r < 0) { lderr(m_image_ctx->cct) << "failed to update object map: " << cpp_strerror(r) << dendl; finish(r); return; } copyup(); } template <typename I> void CopyupRequest<I>::copyup() { auto cct = m_image_ctx->cct; m_image_ctx->image_lock.lock_shared(); auto snapc = m_image_ctx->snapc; auto io_context = m_image_ctx->get_data_io_context(); m_image_ctx->image_lock.unlock_shared(); m_lock.lock(); if (!m_copyup_required) { m_lock.unlock(); ldout(cct, 20) << "skipping copyup" << dendl; finish(0); return; } ldout(cct, 20) << dendl; bool copy_on_read = m_pending_requests.empty() && !m_deep_copied; bool deep_copyup = !snapc.snaps.empty() && !m_copyup_is_zero; if (m_copyup_is_zero) { m_copyup_data.clear(); m_copyup_extent_map.clear(); } neorados::WriteOp copyup_op; neorados::WriteOp write_op; neorados::WriteOp* op; if (copy_on_read || deep_copyup) { // copyup-op will use its own request issued to the initial object revision op = &copyup_op; ++m_pending_copyups; } else { // copyup-op can be combined with the write-ops (if any) op = &write_op; } if (m_image_ctx->enable_sparse_copyup) { cls_client::sparse_copyup(op, m_copyup_extent_map, m_copyup_data); } else { // convert the sparse read back into a standard (thick) read Striper::StripedReadResult destriper; destriper.add_partial_sparse_result( cct, std::move(m_copyup_data), m_copyup_extent_map, 0, {{0, m_image_ctx->layout.object_size}}); bufferlist thick_bl; destriper.assemble_result(cct, thick_bl, false); cls_client::copyup(op, thick_bl); } ObjectRequest<I>::add_write_hint(*m_image_ctx, op); if (!copy_on_read) { // merge all pending write ops into this single RADOS op for (auto req : m_pending_requests) { ldout(cct, 20) << "add_copyup_ops " << req << dendl; req->add_copyup_ops(&write_op); } if (write_op.size() > 0) { ++m_pending_copyups; } } m_lock.unlock(); // issue librados ops at the end to simplify test cases auto object = neorados::Object{data_object_name(m_image_ctx, m_object_no)}; if (copyup_op.size() > 0) { // send only the copyup request with a blank snapshot context so that // all snapshots are detected from the parent for this object. If // this is a CoW request, a second request will be created for the // actual modification. ldout(cct, 20) << "copyup with empty snapshot context" << dendl; auto copyup_io_context = *io_context; copyup_io_context.write_snap_context({}); m_image_ctx->rados_api.execute( object, copyup_io_context, std::move(copyup_op), librbd::asio::util::get_callback_adapter( [this](int r) { handle_copyup(r); }), nullptr, (this->m_trace.valid() ? this->m_trace.get_info() : nullptr)); } if (write_op.size() > 0) { // compare-and-write doesn't add any write ops (copyup+cmpext+write // can't be executed in the same RADOS op because, unless the object // was already present in the clone, cmpext wouldn't see it) ldout(cct, 20) << (!deep_copyup && write_op.size() > 2 ? "copyup + ops" : !deep_copyup ? "copyup" : "ops") << " with current snapshot context" << dendl; m_image_ctx->rados_api.execute( object, *io_context, std::move(write_op), librbd::asio::util::get_callback_adapter( [this](int r) { handle_copyup(r); }), nullptr, (this->m_trace.valid() ? this->m_trace.get_info() : nullptr)); } } template <typename I> void CopyupRequest<I>::handle_copyup(int r) { auto cct = m_image_ctx->cct; unsigned pending_copyups; int copyup_ret_val = r; { std::lock_guard locker{m_lock}; ceph_assert(m_pending_copyups > 0); pending_copyups = --m_pending_copyups; if (m_copyup_ret_val < 0) { copyup_ret_val = m_copyup_ret_val; } else if (r < 0) { m_copyup_ret_val = r; } } ldout(cct, 20) << "r=" << r << ", " << "pending=" << pending_copyups << dendl; if (pending_copyups == 0) { if (copyup_ret_val < 0 && copyup_ret_val != -ENOENT) { lderr(cct) << "failed to copyup object: " << cpp_strerror(copyup_ret_val) << dendl; complete_requests(false, copyup_ret_val); } finish(0); } } template <typename I> void CopyupRequest<I>::finish(int r) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << dendl; complete_requests(true, r); delete this; } template <typename I> void CopyupRequest<I>::complete_requests(bool override_restart_retval, int r) { auto cct = m_image_ctx->cct; remove_from_list(); while (!m_pending_requests.empty()) { auto it = m_pending_requests.begin(); auto req = *it; ldout(cct, 20) << "completing request " << req << dendl; req->handle_copyup(r); m_pending_requests.erase(it); } if (override_restart_retval) { r = -ERESTART; } while (!m_restart_requests.empty()) { auto it = m_restart_requests.begin(); auto req = *it; ldout(cct, 20) << "restarting request " << req << dendl; req->handle_copyup(r); m_restart_requests.erase(it); } } template <typename I> void CopyupRequest<I>::disable_append_requests() { ceph_assert(ceph_mutex_is_locked(m_lock)); m_append_request_permitted = false; } template <typename I> void CopyupRequest<I>::remove_from_list() { std::lock_guard copyup_list_locker{m_image_ctx->copyup_list_lock}; auto it = m_image_ctx->copyup_list.find(m_object_no); if (it != m_image_ctx->copyup_list.end()) { m_image_ctx->copyup_list.erase(it); } } template <typename I> bool CopyupRequest<I>::is_copyup_required() { ceph_assert(ceph_mutex_is_locked(m_lock)); bool copy_on_read = m_pending_requests.empty(); if (copy_on_read) { // always force a copyup if CoR enabled return true; } if (!m_copyup_is_zero) { return true; } for (auto req : m_pending_requests) { if (!req->is_empty_write_op()) { return true; } } return false; } template <typename I> bool CopyupRequest<I>::is_deep_copy() const { ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); return !m_image_ctx->migration_info.empty(); } template <typename I> bool CopyupRequest<I>::is_update_object_map_required(int r) { ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); if (r < 0) { return false; } if (m_image_ctx->object_map == nullptr) { return false; } if (m_image_ctx->migration_info.empty()) { // migration might have completed while IO was in-flight, // assume worst-case and perform an object map update return true; } auto it = m_image_ctx->migration_info.snap_map.find(CEPH_NOSNAP); ceph_assert(it != m_image_ctx->migration_info.snap_map.end()); return it->second[0] != CEPH_NOSNAP; } template <typename I> void CopyupRequest<I>::compute_deep_copy_snap_ids() { ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); // don't copy ids for the snaps updated by object deep copy or // that don't overlap std::set<uint64_t> deep_copied; for (auto &it : m_image_ctx->migration_info.snap_map) { if (it.first != CEPH_NOSNAP) { deep_copied.insert(it.second.front()); } } ldout(m_image_ctx->cct, 15) << "deep_copied=" << deep_copied << dendl; std::copy_if(m_image_ctx->snaps.rbegin(), m_image_ctx->snaps.rend(), std::back_inserter(m_snap_ids), [this, cct=m_image_ctx->cct, &deep_copied](uint64_t snap_id) { if (deep_copied.count(snap_id)) { m_first_snap_is_clean = true; return false; } uint64_t raw_overlap = 0; uint64_t object_overlap = 0; int r = m_image_ctx->get_parent_overlap(snap_id, &raw_overlap); if (r < 0) { ldout(cct, 5) << "failed getting parent overlap for snap_id: " << snap_id << ": " << cpp_strerror(r) << dendl; } else if (raw_overlap > 0) { auto [parent_extents, area] = util::object_to_area_extents( m_image_ctx, m_object_no, {{0, m_image_ctx->layout.object_size}}); object_overlap = m_image_ctx->prune_parent_extents(parent_extents, area, raw_overlap, false); } return object_overlap > 0; }); } template <typename I> void CopyupRequest<I>::convert_copyup_extent_map() { auto cct = m_image_ctx->cct; Extents image_extent_map; image_extent_map.swap(m_copyup_extent_map); m_copyup_extent_map.reserve(image_extent_map.size()); // convert the image-extent extent map to object-extents for (auto [image_offset, image_length] : image_extent_map) { striper::LightweightObjectExtents object_extents; util::area_to_object_extents(m_image_ctx, image_offset, image_length, m_image_area, 0, &object_extents); for (auto& object_extent : object_extents) { m_copyup_extent_map.emplace_back( object_extent.offset, object_extent.length); } } ldout(cct, 20) << "image_extents=" << image_extent_map << ", " << "object_extents=" << m_copyup_extent_map << dendl; } template <typename I> int CopyupRequest<I>::prepare_copyup_data() { ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); auto cct = m_image_ctx->cct; SnapshotSparseBufferlist snapshot_sparse_bufferlist; auto& sparse_bufferlist = snapshot_sparse_bufferlist[0]; bool copy_on_read = m_pending_requests.empty(); bool maybe_deep_copyup = !m_image_ctx->snapc.snaps.empty(); if (copy_on_read || maybe_deep_copyup) { // stand-alone copyup that will not be overwritten until HEAD revision ldout(cct, 20) << "processing full copy-up" << dendl; uint64_t buffer_offset = 0; for (auto [object_offset, object_length] : m_copyup_extent_map) { bufferlist sub_bl; sub_bl.substr_of(m_copyup_data, buffer_offset, object_length); buffer_offset += object_length; sparse_bufferlist.insert( object_offset, object_length, {SPARSE_EXTENT_STATE_DATA, object_length, std::move(sub_bl)}); } } else { // copyup that will concurrently written to the HEAD revision with the // associated write-ops so only process partial extents uint64_t buffer_offset = 0; for (auto [object_offset, object_length] : m_copyup_extent_map) { interval_set<uint64_t> copyup_object_extents; copyup_object_extents.insert(object_offset, object_length); interval_set<uint64_t> intersection; intersection.intersection_of(copyup_object_extents, m_write_object_extents); // extract only portions of the parent copyup data that have not // been overwritten by write-ops copyup_object_extents.subtract(intersection); for (auto [copyup_offset, copyup_length] : copyup_object_extents) { bufferlist sub_bl; sub_bl.substr_of( m_copyup_data, buffer_offset + (copyup_offset - object_offset), copyup_length); ceph_assert(sub_bl.length() == copyup_length); sparse_bufferlist.insert( copyup_offset, copyup_length, {SPARSE_EXTENT_STATE_DATA, copyup_length, std::move(sub_bl)}); } buffer_offset += object_length; } ldout(cct, 20) << "processing partial copy-up: " << sparse_bufferlist << dendl; } // Let dispatch layers have a chance to process the data auto r = m_image_ctx->io_object_dispatcher->prepare_copyup( m_object_no, &snapshot_sparse_bufferlist); if (r < 0) { return r; } // Convert sparse extents back to extent map m_copyup_data.clear(); m_copyup_extent_map.clear(); m_copyup_extent_map.reserve(sparse_bufferlist.ext_count()); for (auto& extent : sparse_bufferlist) { auto& sbe = extent.get_val(); if (sbe.state == SPARSE_EXTENT_STATE_DATA) { m_copyup_extent_map.emplace_back(extent.get_off(), extent.get_len()); m_copyup_data.append(sbe.bl); } } return 0; } } // namespace io } // namespace librbd template class librbd::io::CopyupRequest<librbd::ImageCtx>;
23,927
29.914729
85
cc
null
ceph-main/src/librbd/io/CopyupRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_COPYUP_REQUEST_H #define CEPH_LIBRBD_IO_COPYUP_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/interval_set.h" #include "common/ceph_mutex.h" #include "common/zipkin_trace.h" #include "librbd/io/AsyncOperation.h" #include "librbd/io/Types.h" #include <map> #include <string> #include <vector> namespace ZTracer { struct Trace; } namespace librbd { struct ImageCtx; namespace io { template <typename I> class AbstractObjectWriteRequest; template <typename ImageCtxT = librbd::ImageCtx> class CopyupRequest { public: static CopyupRequest* create(ImageCtxT *ictx, uint64_t objectno, Extents &&image_extents, ImageArea area, const ZTracer::Trace &parent_trace) { return new CopyupRequest(ictx, objectno, std::move(image_extents), area, parent_trace); } CopyupRequest(ImageCtxT *ictx, uint64_t objectno, Extents &&image_extents, ImageArea area, const ZTracer::Trace &parent_trace); ~CopyupRequest(); void append_request(AbstractObjectWriteRequest<ImageCtxT> *req, const Extents& object_extents); void send(); private: /** * Copyup requests go through the following state machine to read from the * parent image, update the object map, and copyup the object: * * * @verbatim * * <start> * | * /---------/ \---------\ * | | * v v * READ_FROM_PARENT DEEP_COPY * | | * \---------\ /---------/ * | * v (skip if not needed) * UPDATE_OBJECT_MAPS * | * v (skip if not needed) * COPYUP * | * v * <finish> * * @endverbatim * * The OBJECT_MAP state is skipped if the object map isn't enabled or if * an object map update isn't required. The COPYUP state is skipped if * no data was read from the parent *and* there are no additional ops. */ typedef std::vector<AbstractObjectWriteRequest<ImageCtxT> *> WriteRequests; ImageCtxT *m_image_ctx; uint64_t m_object_no; Extents m_image_extents; ImageArea m_image_area; ZTracer::Trace m_trace; bool m_flatten = false; bool m_copyup_required = true; bool m_copyup_is_zero = true; bool m_deep_copied = false; Extents m_copyup_extent_map; ceph::bufferlist m_copyup_data; AsyncOperation m_async_op; std::vector<uint64_t> m_snap_ids; bool m_first_snap_is_clean = false; ceph::mutex m_lock = ceph::make_mutex("CopyupRequest", false); WriteRequests m_pending_requests; unsigned m_pending_copyups = 0; int m_copyup_ret_val = 0; WriteRequests m_restart_requests; bool m_append_request_permitted = true; interval_set<uint64_t> m_write_object_extents; void read_from_parent(); void handle_read_from_parent(int r); void deep_copy(); void handle_deep_copy(int r); void update_object_maps(); void handle_update_object_maps(int r); void copyup(); void handle_copyup(int r); void finish(int r); void complete_requests(bool override_restart_retval, int r); void disable_append_requests(); void remove_from_list(); bool is_copyup_required(); bool is_update_object_map_required(int r); bool is_deep_copy() const; void compute_deep_copy_snap_ids(); void convert_copyup_extent_map(); int prepare_copyup_data(); }; } // namespace io } // namespace librbd extern template class librbd::io::CopyupRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_COPYUP_REQUEST_H
3,844
25.335616
77
h
null
ceph-main/src/librbd/io/Dispatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_DISPATCHER_H #define CEPH_LIBRBD_IO_DISPATCHER_H #include "include/int_types.h" #include "include/Context.h" #include "common/ceph_mutex.h" #include "common/dout.h" #include "common/AsyncOpTracker.h" #include "librbd/Utils.h" #include "librbd/io/DispatcherInterface.h" #include "librbd/io/Types.h" #include <map> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::Dispatcher: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { template <typename ImageCtxT, typename DispatchInterfaceT> class Dispatcher : public DispatchInterfaceT { public: typedef typename DispatchInterfaceT::Dispatch Dispatch; typedef typename DispatchInterfaceT::DispatchLayer DispatchLayer; typedef typename DispatchInterfaceT::DispatchSpec DispatchSpec; Dispatcher(ImageCtxT* image_ctx) : m_image_ctx(image_ctx), m_lock(ceph::make_shared_mutex( librbd::util::unique_lock_name("librbd::io::Dispatcher::lock", this))) { } virtual ~Dispatcher() { ceph_assert(m_dispatches.empty()); } void shut_down(Context* on_finish) override { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; std::map<DispatchLayer, DispatchMeta> dispatches; { std::unique_lock locker{m_lock}; std::swap(dispatches, m_dispatches); } for (auto it : dispatches) { shut_down_dispatch(it.second, &on_finish); } on_finish->complete(0); } void register_dispatch(Dispatch* dispatch) override { auto cct = m_image_ctx->cct; auto type = dispatch->get_dispatch_layer(); ldout(cct, 5) << "dispatch_layer=" << type << dendl; std::unique_lock locker{m_lock}; auto result = m_dispatches.insert( {type, {dispatch, new AsyncOpTracker()}}); ceph_assert(result.second); } bool exists(DispatchLayer dispatch_layer) override { std::unique_lock locker{m_lock}; return m_dispatches.find(dispatch_layer) != m_dispatches.end(); } void shut_down_dispatch(DispatchLayer dispatch_layer, Context* on_finish) override { auto cct = m_image_ctx->cct; ldout(cct, 5) << "dispatch_layer=" << dispatch_layer << dendl; DispatchMeta dispatch_meta; { std::unique_lock locker{m_lock}; auto it = m_dispatches.find(dispatch_layer); if (it == m_dispatches.end()) { on_finish->complete(0); return; } dispatch_meta = it->second; m_dispatches.erase(it); } shut_down_dispatch(dispatch_meta, &on_finish); on_finish->complete(0); } void send(DispatchSpec* dispatch_spec) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "dispatch_spec=" << dispatch_spec << dendl; auto dispatch_layer = dispatch_spec->dispatch_layer; // apply the IO request to all layers -- this method will be re-invoked // by the dispatch layer if continuing / restarting the IO while (true) { m_lock.lock_shared(); dispatch_layer = dispatch_spec->dispatch_layer; auto it = m_dispatches.upper_bound(dispatch_layer); if (it == m_dispatches.end()) { // the request is complete if handled by all layers dispatch_spec->dispatch_result = DISPATCH_RESULT_COMPLETE; m_lock.unlock_shared(); break; } auto& dispatch_meta = it->second; auto dispatch = dispatch_meta.dispatch; auto async_op_tracker = dispatch_meta.async_op_tracker; dispatch_spec->dispatch_result = DISPATCH_RESULT_INVALID; // prevent recursive locking back into the dispatcher while handling IO async_op_tracker->start_op(); m_lock.unlock_shared(); // advance to next layer in case we skip or continue dispatch_spec->dispatch_layer = dispatch->get_dispatch_layer(); bool handled = send_dispatch(dispatch, dispatch_spec); async_op_tracker->finish_op(); // handled ops will resume when the dispatch ctx is invoked if (handled) { return; } } // skipped through to the last layer dispatch_spec->dispatcher_ctx.complete(0); } protected: struct DispatchMeta { Dispatch* dispatch = nullptr; AsyncOpTracker* async_op_tracker = nullptr; DispatchMeta() { } DispatchMeta(Dispatch* dispatch, AsyncOpTracker* async_op_tracker) : dispatch(dispatch), async_op_tracker(async_op_tracker) { } }; ImageCtxT* m_image_ctx; ceph::shared_mutex m_lock; std::map<DispatchLayer, DispatchMeta> m_dispatches; virtual bool send_dispatch(Dispatch* dispatch, DispatchSpec* dispatch_spec) = 0; protected: struct C_LayerIterator : public Context { Dispatcher* dispatcher; Context* on_finish; DispatchLayer dispatch_layer; C_LayerIterator(Dispatcher* dispatcher, DispatchLayer start_layer, Context* on_finish) : dispatcher(dispatcher), on_finish(on_finish), dispatch_layer(start_layer) { } void complete(int r) override { while (true) { dispatcher->m_lock.lock_shared(); auto it = dispatcher->m_dispatches.upper_bound(dispatch_layer); if (it == dispatcher->m_dispatches.end()) { dispatcher->m_lock.unlock_shared(); Context::complete(r); return; } auto& dispatch_meta = it->second; auto dispatch = dispatch_meta.dispatch; // prevent recursive locking back into the dispatcher while handling IO dispatch_meta.async_op_tracker->start_op(); dispatcher->m_lock.unlock_shared(); // next loop should start after current layer dispatch_layer = dispatch->get_dispatch_layer(); auto handled = execute(dispatch, this); dispatch_meta.async_op_tracker->finish_op(); if (handled) { break; } } } void finish(int r) override { on_finish->complete(0); } virtual bool execute(Dispatch* dispatch, Context* on_finish) = 0; }; struct C_InvalidateCache : public C_LayerIterator { C_InvalidateCache(Dispatcher* dispatcher, DispatchLayer start_layer, Context* on_finish) : C_LayerIterator(dispatcher, start_layer, on_finish) { } bool execute(Dispatch* dispatch, Context* on_finish) override { return dispatch->invalidate_cache(on_finish); } }; private: void shut_down_dispatch(DispatchMeta& dispatch_meta, Context** on_finish) { auto dispatch = dispatch_meta.dispatch; auto async_op_tracker = dispatch_meta.async_op_tracker; auto ctx = *on_finish; ctx = new LambdaContext( [dispatch, async_op_tracker, ctx](int r) { delete dispatch; delete async_op_tracker; ctx->complete(r); }); ctx = new LambdaContext([dispatch, ctx](int r) { dispatch->shut_down(ctx); }); *on_finish = new LambdaContext([async_op_tracker, ctx](int r) { async_op_tracker->wait_for_ops(ctx); }); } }; } // namespace io } // namespace librbd #undef dout_subsys #undef dout_prefix #define dout_prefix *_dout #endif // CEPH_LIBRBD_IO_DISPATCHER_H
7,351
28.059289
92
h
null
ceph-main/src/librbd/io/DispatcherInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_DISPATCHER_INTERFACE_H #define CEPH_LIBRBD_IO_DISPATCHER_INTERFACE_H #include "include/int_types.h" struct Context; namespace librbd { namespace io { template <typename DispatchT> struct DispatcherInterface { public: typedef DispatchT Dispatch; typedef typename DispatchT::DispatchLayer DispatchLayer; typedef typename DispatchT::DispatchSpec DispatchSpec; virtual ~DispatcherInterface() { } virtual void shut_down(Context* on_finish) = 0; virtual void register_dispatch(Dispatch* dispatch) = 0; virtual bool exists(DispatchLayer dispatch_layer) = 0; virtual void shut_down_dispatch(DispatchLayer dispatch_layer, Context* on_finish) = 0; virtual void send(DispatchSpec* dispatch_spec) = 0; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_DISPATCHER_INTERFACE_H
967
24.473684
70
h
null
ceph-main/src/librbd/io/FlushTracker.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/FlushTracker.h" #include "common/dout.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::FlushTracker: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { template <typename I> FlushTracker<I>::FlushTracker(I* image_ctx) : m_image_ctx(image_ctx), m_lock(ceph::make_shared_mutex( util::unique_lock_name("librbd::io::FlushTracker::m_lock", this))) { } template <typename I> FlushTracker<I>::~FlushTracker() { std::unique_lock locker{m_lock}; ceph_assert(m_flush_contexts.empty()); } template <typename I> void FlushTracker<I>::shut_down() { auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; std::unique_lock locker{m_lock}; Contexts flush_ctxs; for (auto& [flush_tid, ctxs] : m_flush_contexts) { flush_ctxs.insert(flush_ctxs.end(), ctxs.begin(), ctxs.end()); } m_flush_contexts.clear(); locker.unlock(); for (auto ctx : flush_ctxs) { ctx->complete(0); } } template <typename I> uint64_t FlushTracker<I>::start_io(uint64_t tid) { auto cct = m_image_ctx->cct; std::unique_lock locker{m_lock}; auto [it, inserted] = m_tid_to_flush_tid.insert({tid, ++m_next_flush_tid}); auto flush_tid = it->second; m_in_flight_flush_tids.insert(flush_tid); locker.unlock(); ldout(cct, 20) << "tid=" << tid << ", flush_tid=" << flush_tid << dendl; return flush_tid; } template <typename I> void FlushTracker<I>::finish_io(uint64_t tid) { auto cct = m_image_ctx->cct; std::unique_lock locker{m_lock}; auto tid_to_flush_tid_it = m_tid_to_flush_tid.find(tid); if (tid_to_flush_tid_it == m_tid_to_flush_tid.end()) { return; } auto flush_tid = tid_to_flush_tid_it->second; m_tid_to_flush_tid.erase(tid_to_flush_tid_it); m_in_flight_flush_tids.erase(flush_tid); ldout(cct, 20) << "tid=" << tid << ", flush_tid=" << flush_tid << dendl; auto oldest_flush_tid = std::numeric_limits<uint64_t>::max(); if (!m_in_flight_flush_tids.empty()) { oldest_flush_tid = *m_in_flight_flush_tids.begin(); } // all flushes tagged before the oldest tid should be completed Contexts flush_ctxs; auto flush_contexts_it = m_flush_contexts.begin(); while (flush_contexts_it != m_flush_contexts.end()) { if (flush_contexts_it->first >= oldest_flush_tid) { ldout(cct, 20) << "pending IOs: [" << m_in_flight_flush_tids << "], " << "pending flushes=" << m_flush_contexts << dendl; break; } auto& ctxs = flush_contexts_it->second; flush_ctxs.insert(flush_ctxs.end(), ctxs.begin(), ctxs.end()); flush_contexts_it = m_flush_contexts.erase(flush_contexts_it); } locker.unlock(); if (!flush_ctxs.empty()) { ldout(cct, 20) << "completing flushes: " << flush_ctxs << dendl; for (auto ctx : flush_ctxs) { ctx->complete(0); } } } template <typename I> void FlushTracker<I>::flush(Context* on_finish) { auto cct = m_image_ctx->cct; std::unique_lock locker{m_lock}; if (m_in_flight_flush_tids.empty()) { locker.unlock(); on_finish->complete(0); return; } auto flush_tid = *m_in_flight_flush_tids.rbegin(); m_flush_contexts[flush_tid].push_back(on_finish); ldout(cct, 20) << "flush_tid=" << flush_tid << ", ctx=" << on_finish << ", " << "flush_contexts=" << m_flush_contexts << dendl; } } // namespace io } // namespace librbd template class librbd::io::FlushTracker<librbd::ImageCtx>;
3,639
27.661417
78
cc
null
ceph-main/src/librbd/io/FlushTracker.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_FLUSH_TRACKER_H #define CEPH_LIBRBD_IO_FLUSH_TRACKER_H #include "include/int_types.h" #include "common/ceph_mutex.h" #include <atomic> #include <list> #include <map> #include <set> #include <unordered_map> struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename ImageCtxT> class FlushTracker { public: FlushTracker(ImageCtxT* image_ctx); ~FlushTracker(); void shut_down(); uint64_t start_io(uint64_t tid); void finish_io(uint64_t tid); void flush(Context* on_finish); private: typedef std::list<Context*> Contexts; typedef std::map<uint64_t, Contexts> FlushContexts; typedef std::set<uint64_t> Tids; typedef std::unordered_map<uint64_t, uint64_t> TidToFlushTid; ImageCtxT* m_image_ctx; std::atomic<uint32_t> m_next_flush_tid{0}; mutable ceph::shared_mutex m_lock; TidToFlushTid m_tid_to_flush_tid; Tids m_in_flight_flush_tids; FlushContexts m_flush_contexts; }; } // namespace io } // namespace librbd extern template class librbd::io::FlushTracker<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_FLUSH_TRACKER_H
1,234
18.919355
70
h
null
ceph-main/src/librbd/io/ImageDispatch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ImageDispatch.h" #include "common/dout.h" #include "librbd/ImageCtx.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageRequest.h" #include "librbd/io/ObjectDispatcherInterface.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ImageDispatch: " << this << " " \ << __func__ << ": " namespace librbd { namespace io { namespace { void start_in_flight_io(AioCompletion* aio_comp) { // TODO remove AsyncOperation from AioCompletion if (!aio_comp->async_op.started()) { aio_comp->start_op(); } } ImageArea get_area(const std::atomic<uint32_t>* image_dispatch_flags) { return (*image_dispatch_flags & IMAGE_DISPATCH_FLAG_CRYPTO_HEADER ? ImageArea::CRYPTO_HEADER : ImageArea::DATA); } } // anonymous namespace template <typename I> void ImageDispatch<I>::shut_down(Context* on_finish) { on_finish->complete(0); } template <typename I> bool ImageDispatch<I>::read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; auto area = get_area(image_dispatch_flags); ldout(cct, 20) << "image_extents=" << image_extents << " area=" << area << dendl; start_in_flight_io(aio_comp); *dispatch_result = DISPATCH_RESULT_COMPLETE; ImageRequest<I>::aio_read(m_image_ctx, aio_comp, std::move(image_extents), area, std::move(read_result), io_context, op_flags, read_flags, parent_trace); return true; } template <typename I> bool ImageDispatch<I>::write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; auto area = get_area(image_dispatch_flags); ldout(cct, 20) << "image_extents=" << image_extents << " area=" << area << dendl; start_in_flight_io(aio_comp); *dispatch_result = DISPATCH_RESULT_COMPLETE; ImageRequest<I>::aio_write(m_image_ctx, aio_comp, std::move(image_extents), area, std::move(bl), op_flags, parent_trace); return true; } template <typename I> bool ImageDispatch<I>::discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; auto area = get_area(image_dispatch_flags); ldout(cct, 20) << "image_extents=" << image_extents << " area=" << area << dendl; start_in_flight_io(aio_comp); *dispatch_result = DISPATCH_RESULT_COMPLETE; ImageRequest<I>::aio_discard(m_image_ctx, aio_comp, std::move(image_extents), area, discard_granularity_bytes, parent_trace); return true; } template <typename I> bool ImageDispatch<I>::write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; auto area = get_area(image_dispatch_flags); ldout(cct, 20) << "image_extents=" << image_extents << " area=" << area << dendl; start_in_flight_io(aio_comp); *dispatch_result = DISPATCH_RESULT_COMPLETE; ImageRequest<I>::aio_writesame(m_image_ctx, aio_comp, std::move(image_extents), area, std::move(bl), op_flags, parent_trace); return true; } template <typename I> bool ImageDispatch<I>::compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; auto area = get_area(image_dispatch_flags); ldout(cct, 20) << "image_extents=" << image_extents << " area=" << area << dendl; start_in_flight_io(aio_comp); *dispatch_result = DISPATCH_RESULT_COMPLETE; ImageRequest<I>::aio_compare_and_write(m_image_ctx, aio_comp, std::move(image_extents), area, std::move(cmp_bl), std::move(bl), mismatch_offset, op_flags, parent_trace); return true; } template <typename I> bool ImageDispatch<I>::flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; start_in_flight_io(aio_comp); *dispatch_result = DISPATCH_RESULT_COMPLETE; ImageRequest<I>::aio_flush(m_image_ctx, aio_comp, flush_source, parent_trace); return true; } template <typename I> bool ImageDispatch<I>::list_snaps( AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; auto area = get_area(image_dispatch_flags); ldout(cct, 20) << "image_extents=" << image_extents << " area=" << area << dendl; start_in_flight_io(aio_comp); *dispatch_result = DISPATCH_RESULT_COMPLETE; ImageListSnapsRequest<I> req(*m_image_ctx, aio_comp, std::move(image_extents), area, std::move(snap_ids), list_snaps_flags, snapshot_delta, parent_trace); req.send(); return true; } template <typename I> bool ImageDispatch<I>::invalidate_cache(Context* on_finish) { auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; std::shared_lock owner_lock{m_image_ctx->owner_lock}; m_image_ctx->io_object_dispatcher->invalidate_cache(on_finish); return true; } } // namespace io } // namespace librbd template class librbd::io::ImageDispatch<librbd::ImageCtx>;
7,057
34.114428
80
cc
null
ceph-main/src/librbd/io/ImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCH_H #define CEPH_LIBRBD_IO_IMAGE_DISPATCH_H #include "librbd/io/ImageDispatchInterface.h" #include "include/int_types.h" #include "include/buffer.h" #include "common/zipkin_trace.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename ImageCtxT> class ImageDispatch : public ImageDispatchInterface { public: ImageDispatch(ImageCtxT* image_ctx) : m_image_ctx(image_ctx) { } ImageDispatchLayer get_dispatch_layer() const override { return IMAGE_DISPATCH_LAYER_CORE; } void shut_down(Context* on_finish) override; bool read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool invalidate_cache(Context* on_finish) override; private: ImageCtxT* m_image_ctx; }; } // namespace io } // namespace librbd extern template class librbd::io::ImageDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_IMAGE_DISPATCH_H
3,345
33.854167
77
h
null
ceph-main/src/librbd/io/ImageDispatchInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCH_INTERFACE_H #define CEPH_LIBRBD_IO_IMAGE_DISPATCH_INTERFACE_H #include "include/int_types.h" #include "include/buffer.h" #include "common/zipkin_trace.h" #include "librbd/Types.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" #include <atomic> struct Context; namespace librbd { namespace io { struct AioCompletion; struct ImageDispatchSpec; struct ImageDispatchInterface { typedef ImageDispatchLayer DispatchLayer; typedef ImageDispatchSpec DispatchSpec; virtual ~ImageDispatchInterface() { } virtual ImageDispatchLayer get_dispatch_layer() const = 0; virtual void shut_down(Context* on_finish) = 0; virtual bool read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool list_snaps( AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool invalidate_cache(Context* on_finish) = 0; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_IMAGE_DISPATCH_INTERFACE_H
3,262
36.079545
77
h
null
ceph-main/src/librbd/io/ImageDispatchSpec.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ImageDispatchSpec.h" #include "librbd/ImageCtx.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageRequest.h" #include "librbd/io/ImageDispatcherInterface.h" #include <boost/variant.hpp> namespace librbd { namespace io { void ImageDispatchSpec::C_Dispatcher::complete(int r) { switch (image_dispatch_spec->dispatch_result) { case DISPATCH_RESULT_RESTART: ceph_assert(image_dispatch_spec->dispatch_layer != 0); image_dispatch_spec->dispatch_layer = static_cast<ImageDispatchLayer>( image_dispatch_spec->dispatch_layer - 1); [[fallthrough]]; case DISPATCH_RESULT_CONTINUE: if (r < 0) { // bubble dispatch failure through AioCompletion image_dispatch_spec->dispatch_result = DISPATCH_RESULT_COMPLETE; image_dispatch_spec->fail(r); return; } image_dispatch_spec->send(); break; case DISPATCH_RESULT_COMPLETE: finish(r); break; case DISPATCH_RESULT_INVALID: ceph_abort(); break; } } void ImageDispatchSpec::C_Dispatcher::finish(int r) { delete image_dispatch_spec; } void ImageDispatchSpec::send() { image_dispatcher->send(this); } void ImageDispatchSpec::fail(int r) { dispatch_result = DISPATCH_RESULT_COMPLETE; aio_comp->fail(r); } } // namespace io } // namespace librbd
1,403
24.527273
74
cc
null
ceph-main/src/librbd/io/ImageDispatchSpec.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCH_SPEC_H #define CEPH_LIBRBD_IO_IMAGE_DISPATCH_SPEC_H #include "include/int_types.h" #include "include/buffer.h" #include "include/Context.h" #include "common/zipkin_trace.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/Types.h" #include "librbd/io/ReadResult.h" #include <boost/variant/variant.hpp> #include <atomic> namespace librbd { class ImageCtx; namespace io { struct ImageDispatcherInterface; class ImageDispatchSpec { private: // helper to avoid extra heap allocation per object IO struct C_Dispatcher : public Context { ImageDispatchSpec* image_dispatch_spec; C_Dispatcher(ImageDispatchSpec* image_dispatch_spec) : image_dispatch_spec(image_dispatch_spec) { } void complete(int r) override; void finish(int r) override; }; public: struct Read { ReadResult read_result; int read_flags; Read(ReadResult &&read_result, int read_flags) : read_result(std::move(read_result)), read_flags(read_flags) { } }; struct Discard { uint32_t discard_granularity_bytes; Discard(uint32_t discard_granularity_bytes) : discard_granularity_bytes(discard_granularity_bytes) { } }; struct Write { bufferlist bl; Write(bufferlist&& bl) : bl(std::move(bl)) { } }; struct WriteSame { bufferlist bl; WriteSame(bufferlist&& bl) : bl(std::move(bl)) { } }; struct CompareAndWrite { bufferlist cmp_bl; bufferlist bl; uint64_t *mismatch_offset; CompareAndWrite(bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset) : cmp_bl(std::move(cmp_bl)), bl(std::move(bl)), mismatch_offset(mismatch_offset) { } }; struct Flush { FlushSource flush_source; Flush(FlushSource flush_source) : flush_source(flush_source) { } }; struct ListSnaps { SnapIds snap_ids; int list_snaps_flags; SnapshotDelta* snapshot_delta; ListSnaps(SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta) : snap_ids(std::move(snap_ids)), list_snaps_flags(list_snaps_flags), snapshot_delta(snapshot_delta) { } }; typedef boost::variant<Read, Discard, Write, WriteSame, CompareAndWrite, Flush, ListSnaps> Request; C_Dispatcher dispatcher_ctx; ImageDispatcherInterface* image_dispatcher; ImageDispatchLayer dispatch_layer; std::atomic<uint32_t> image_dispatch_flags = 0; DispatchResult dispatch_result = DISPATCH_RESULT_INVALID; AioCompletion* aio_comp; Extents image_extents; Request request; IOContext io_context; int op_flags; ZTracer::Trace parent_trace; uint64_t tid = 0; template <typename ImageCtxT = ImageCtx> static ImageDispatchSpec* create_read( ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace) { return new ImageDispatchSpec(image_ctx.io_image_dispatcher, image_dispatch_layer, aio_comp, std::move(image_extents), area, Read{std::move(read_result), read_flags}, io_context, op_flags, parent_trace); } template <typename ImageCtxT = ImageCtx> static ImageDispatchSpec* create_discard( ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace) { return new ImageDispatchSpec(image_ctx.io_image_dispatcher, image_dispatch_layer, aio_comp, std::move(image_extents), area, Discard{discard_granularity_bytes}, {}, 0, parent_trace); } template <typename ImageCtxT = ImageCtx> static ImageDispatchSpec* create_write( ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) { return new ImageDispatchSpec(image_ctx.io_image_dispatcher, image_dispatch_layer, aio_comp, std::move(image_extents), area, Write{std::move(bl)}, {}, op_flags, parent_trace); } template <typename ImageCtxT = ImageCtx> static ImageDispatchSpec* create_write_same( ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) { return new ImageDispatchSpec(image_ctx.io_image_dispatcher, image_dispatch_layer, aio_comp, std::move(image_extents), area, WriteSame{std::move(bl)}, {}, op_flags, parent_trace); } template <typename ImageCtxT = ImageCtx> static ImageDispatchSpec* create_compare_and_write( ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace) { return new ImageDispatchSpec(image_ctx.io_image_dispatcher, image_dispatch_layer, aio_comp, std::move(image_extents), area, CompareAndWrite{std::move(cmp_bl), std::move(bl), mismatch_offset}, {}, op_flags, parent_trace); } template <typename ImageCtxT = ImageCtx> static ImageDispatchSpec* create_flush( ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer, AioCompletion *aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace) { return new ImageDispatchSpec(image_ctx.io_image_dispatcher, image_dispatch_layer, aio_comp, {}, ImageArea::DATA /* dummy for {} */, Flush{flush_source}, {}, 0, parent_trace); } template <typename ImageCtxT = ImageCtx> static ImageDispatchSpec* create_list_snaps( ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace) { return new ImageDispatchSpec(image_ctx.io_image_dispatcher, image_dispatch_layer, aio_comp, std::move(image_extents), area, ListSnaps{std::move(snap_ids), list_snaps_flags, snapshot_delta}, {}, 0, parent_trace); } ~ImageDispatchSpec() { aio_comp->put(); } void send(); void fail(int r); private: struct SendVisitor; struct IsWriteOpVisitor; struct TokenRequestedVisitor; ImageDispatchSpec(ImageDispatcherInterface* image_dispatcher, ImageDispatchLayer image_dispatch_layer, AioCompletion* aio_comp, Extents&& image_extents, ImageArea area, Request&& request, IOContext io_context, int op_flags, const ZTracer::Trace& parent_trace) : dispatcher_ctx(this), image_dispatcher(image_dispatcher), dispatch_layer(image_dispatch_layer), aio_comp(aio_comp), image_extents(std::move(image_extents)), request(std::move(request)), io_context(io_context), op_flags(op_flags), parent_trace(parent_trace) { ceph_assert(aio_comp->image_dispatcher_ctx == nullptr); aio_comp->image_dispatcher_ctx = &dispatcher_ctx; aio_comp->get(); switch (area) { case ImageArea::DATA: break; case ImageArea::CRYPTO_HEADER: image_dispatch_flags |= IMAGE_DISPATCH_FLAG_CRYPTO_HEADER; break; default: ceph_abort(); } } }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_IMAGE_DISPATCH_SPEC_H
8,864
33.764706
79
h
null
ceph-main/src/librbd/io/ImageDispatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ImageDispatcher.h" #include "include/Context.h" #include "common/AsyncOpTracker.h" #include "common/dout.h" #include "librbd/ImageCtx.h" #include "librbd/crypto/CryptoImageDispatch.h" #include "librbd/io/ImageDispatch.h" #include "librbd/io/ImageDispatchInterface.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/QueueImageDispatch.h" #include "librbd/io/QosImageDispatch.h" #include "librbd/io/RefreshImageDispatch.h" #include "librbd/io/Utils.h" #include "librbd/io/WriteBlockImageDispatch.h" #include <boost/variant.hpp> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ImageDispatcher: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { template <typename I> struct ImageDispatcher<I>::SendVisitor : public boost::static_visitor<bool> { ImageDispatchInterface* image_dispatch; ImageDispatchSpec* image_dispatch_spec; SendVisitor(ImageDispatchInterface* image_dispatch, ImageDispatchSpec* image_dispatch_spec) : image_dispatch(image_dispatch), image_dispatch_spec(image_dispatch_spec) { } bool operator()(ImageDispatchSpec::Read& read) const { return image_dispatch->read( image_dispatch_spec->aio_comp, std::move(image_dispatch_spec->image_extents), std::move(read.read_result), image_dispatch_spec->io_context, image_dispatch_spec->op_flags, read.read_flags, image_dispatch_spec->parent_trace, image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags, &image_dispatch_spec->dispatch_result, &image_dispatch_spec->aio_comp->image_dispatcher_ctx, &image_dispatch_spec->dispatcher_ctx); } bool operator()(ImageDispatchSpec::Discard& discard) const { return image_dispatch->discard( image_dispatch_spec->aio_comp, std::move(image_dispatch_spec->image_extents), discard.discard_granularity_bytes, image_dispatch_spec->parent_trace, image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags, &image_dispatch_spec->dispatch_result, &image_dispatch_spec->aio_comp->image_dispatcher_ctx, &image_dispatch_spec->dispatcher_ctx); } bool operator()(ImageDispatchSpec::Write& write) const { return image_dispatch->write( image_dispatch_spec->aio_comp, std::move(image_dispatch_spec->image_extents), std::move(write.bl), image_dispatch_spec->op_flags, image_dispatch_spec->parent_trace, image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags, &image_dispatch_spec->dispatch_result, &image_dispatch_spec->aio_comp->image_dispatcher_ctx, &image_dispatch_spec->dispatcher_ctx); } bool operator()(ImageDispatchSpec::WriteSame& write_same) const { return image_dispatch->write_same( image_dispatch_spec->aio_comp, std::move(image_dispatch_spec->image_extents), std::move(write_same.bl), image_dispatch_spec->op_flags, image_dispatch_spec->parent_trace, image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags, &image_dispatch_spec->dispatch_result, &image_dispatch_spec->aio_comp->image_dispatcher_ctx, &image_dispatch_spec->dispatcher_ctx); } bool operator()( ImageDispatchSpec::CompareAndWrite& compare_and_write) const { return image_dispatch->compare_and_write( image_dispatch_spec->aio_comp, std::move(image_dispatch_spec->image_extents), std::move(compare_and_write.cmp_bl), std::move(compare_and_write.bl), compare_and_write.mismatch_offset, image_dispatch_spec->op_flags, image_dispatch_spec->parent_trace, image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags, &image_dispatch_spec->dispatch_result, &image_dispatch_spec->aio_comp->image_dispatcher_ctx, &image_dispatch_spec->dispatcher_ctx); } bool operator()(ImageDispatchSpec::Flush& flush) const { return image_dispatch->flush( image_dispatch_spec->aio_comp, flush.flush_source, image_dispatch_spec->parent_trace, image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags, &image_dispatch_spec->dispatch_result, &image_dispatch_spec->aio_comp->image_dispatcher_ctx, &image_dispatch_spec->dispatcher_ctx); } bool operator()(ImageDispatchSpec::ListSnaps& list_snaps) const { return image_dispatch->list_snaps( image_dispatch_spec->aio_comp, std::move(image_dispatch_spec->image_extents), std::move(list_snaps.snap_ids), list_snaps.list_snaps_flags, list_snaps.snapshot_delta, image_dispatch_spec->parent_trace, image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags, &image_dispatch_spec->dispatch_result, &image_dispatch_spec->aio_comp->image_dispatcher_ctx, &image_dispatch_spec->dispatcher_ctx); } }; template <typename I> struct ImageDispatcher<I>::PreprocessVisitor : public boost::static_visitor<bool> { ImageDispatcher<I>* image_dispatcher; ImageDispatchSpec* image_dispatch_spec; PreprocessVisitor(ImageDispatcher<I>* image_dispatcher, ImageDispatchSpec* image_dispatch_spec) : image_dispatcher(image_dispatcher), image_dispatch_spec(image_dispatch_spec) { } bool clip_request() const { auto area = (image_dispatch_spec->image_dispatch_flags & IMAGE_DISPATCH_FLAG_CRYPTO_HEADER ? ImageArea::CRYPTO_HEADER : ImageArea::DATA); int r = util::clip_request(image_dispatcher->m_image_ctx, &image_dispatch_spec->image_extents, area); if (r < 0) { image_dispatch_spec->fail(r); return true; } return false; } bool operator()(ImageDispatchSpec::Read& read) const { if ((read.read_flags & READ_FLAG_DISABLE_CLIPPING) != 0) { return false; } return clip_request(); } bool operator()(ImageDispatchSpec::Flush&) const { return clip_request(); } bool operator()(ImageDispatchSpec::ListSnaps&) const { return false; } template <typename T> bool operator()(T&) const { if (clip_request()) { return true; } std::shared_lock image_locker{image_dispatcher->m_image_ctx->image_lock}; if (image_dispatcher->m_image_ctx->snap_id != CEPH_NOSNAP || image_dispatcher->m_image_ctx->read_only) { image_dispatch_spec->fail(-EROFS); return true; } return false; } }; template <typename I> ImageDispatcher<I>::ImageDispatcher(I* image_ctx) : Dispatcher<I, ImageDispatcherInterface>(image_ctx) { // configure the core image dispatch handler on startup auto image_dispatch = new ImageDispatch(image_ctx); this->register_dispatch(image_dispatch); auto queue_image_dispatch = new QueueImageDispatch(image_ctx); this->register_dispatch(queue_image_dispatch); m_qos_image_dispatch = new QosImageDispatch<I>(image_ctx); this->register_dispatch(m_qos_image_dispatch); auto refresh_image_dispatch = new RefreshImageDispatch(image_ctx); this->register_dispatch(refresh_image_dispatch); m_write_block_dispatch = new WriteBlockImageDispatch<I>(image_ctx); this->register_dispatch(m_write_block_dispatch); } template <typename I> void ImageDispatcher<I>::invalidate_cache(Context* on_finish) { auto image_ctx = this->m_image_ctx; auto cct = image_ctx->cct; ldout(cct, 5) << dendl; auto ctx = new C_InvalidateCache( this, IMAGE_DISPATCH_LAYER_NONE, on_finish); ctx->complete(0); } template <typename I> void ImageDispatcher<I>::shut_down(Context* on_finish) { // TODO ensure all IOs are executed via a dispatcher // ensure read-ahead / copy-on-read ops are finished since they are // currently outside dispatcher tracking auto async_op = new AsyncOperation(); on_finish = new LambdaContext([async_op, on_finish](int r) { async_op->finish_op(); delete async_op; on_finish->complete(0); }); on_finish = new LambdaContext([this, on_finish](int r) { Dispatcher<I, ImageDispatcherInterface>::shut_down(on_finish); }); async_op->start_op(*this->m_image_ctx); async_op->flush(on_finish); } template <typename I> void ImageDispatcher<I>::apply_qos_schedule_tick_min(uint64_t tick) { m_qos_image_dispatch->apply_qos_schedule_tick_min(tick); } template <typename I> void ImageDispatcher<I>::apply_qos_limit(uint64_t flag, uint64_t limit, uint64_t burst, uint64_t burst_seconds) { m_qos_image_dispatch->apply_qos_limit(flag, limit, burst, burst_seconds); } template <typename I> void ImageDispatcher<I>::apply_qos_exclude_ops(uint64_t exclude_ops) { m_qos_image_dispatch->apply_qos_exclude_ops(exclude_ops); } template <typename I> bool ImageDispatcher<I>::writes_blocked() const { return m_write_block_dispatch->writes_blocked(); } template <typename I> int ImageDispatcher<I>::block_writes() { return m_write_block_dispatch->block_writes(); } template <typename I> void ImageDispatcher<I>::block_writes(Context *on_blocked) { m_write_block_dispatch->block_writes(on_blocked); } template <typename I> void ImageDispatcher<I>::unblock_writes() { m_write_block_dispatch->unblock_writes(); } template <typename I> void ImageDispatcher<I>::wait_on_writes_unblocked(Context *on_unblocked) { m_write_block_dispatch->wait_on_writes_unblocked(on_unblocked); } template <typename I> void ImageDispatcher<I>::remap_to_physical(Extents& image_extents, ImageArea area) { std::shared_lock locker{this->m_lock}; auto it = this->m_dispatches.find(IMAGE_DISPATCH_LAYER_CRYPTO); if (it == this->m_dispatches.end()) { ceph_assert(area == ImageArea::DATA); return; } auto crypto_image_dispatch = static_cast<crypto::CryptoImageDispatch*>( it->second.dispatch); crypto_image_dispatch->remap_to_physical(image_extents, area); } template <typename I> ImageArea ImageDispatcher<I>::remap_to_logical(Extents& image_extents) { std::shared_lock locker{this->m_lock}; auto it = this->m_dispatches.find(IMAGE_DISPATCH_LAYER_CRYPTO); if (it == this->m_dispatches.end()) { return ImageArea::DATA; } auto crypto_image_dispatch = static_cast<crypto::CryptoImageDispatch*>( it->second.dispatch); return crypto_image_dispatch->remap_to_logical(image_extents); } template <typename I> bool ImageDispatcher<I>::send_dispatch( ImageDispatchInterface* image_dispatch, ImageDispatchSpec* image_dispatch_spec) { if (image_dispatch_spec->tid == 0) { image_dispatch_spec->tid = ++m_next_tid; bool finished = preprocess(image_dispatch_spec); if (finished) { return true; } } return boost::apply_visitor( SendVisitor{image_dispatch, image_dispatch_spec}, image_dispatch_spec->request); } template <typename I> bool ImageDispatcher<I>::preprocess( ImageDispatchSpec* image_dispatch_spec) { return boost::apply_visitor( PreprocessVisitor{this, image_dispatch_spec}, image_dispatch_spec->request); } } // namespace io } // namespace librbd template class librbd::io::ImageDispatcher<librbd::ImageCtx>;
11,280
33.710769
82
cc
null
ceph-main/src/librbd/io/ImageDispatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCHER_H #define CEPH_LIBRBD_IO_IMAGE_DISPATCHER_H #include "include/int_types.h" #include "common/ceph_mutex.h" #include "librbd/io/Dispatcher.h" #include "librbd/io/ImageDispatchInterface.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ImageDispatcherInterface.h" #include "librbd/io/Types.h" #include <atomic> #include <map> struct Context; namespace librbd { struct ImageCtx; namespace io { template <typename> struct QosImageDispatch; template <typename> struct WriteBlockImageDispatch; template <typename ImageCtxT = ImageCtx> class ImageDispatcher : public Dispatcher<ImageCtxT, ImageDispatcherInterface> { public: ImageDispatcher(ImageCtxT* image_ctx); void invalidate_cache(Context* on_finish) override; void shut_down(Context* on_finish) override; void apply_qos_schedule_tick_min(uint64_t tick) override; void apply_qos_limit(uint64_t flag, uint64_t limit, uint64_t burst, uint64_t burst_seconds) override; void apply_qos_exclude_ops(uint64_t exclude_ops) override; bool writes_blocked() const override; int block_writes() override; void block_writes(Context *on_blocked) override; void unblock_writes() override; void wait_on_writes_unblocked(Context *on_unblocked) override; void remap_to_physical(Extents& image_extents, ImageArea area) override; ImageArea remap_to_logical(Extents& image_extents) override; protected: bool send_dispatch( ImageDispatchInterface* image_dispatch, ImageDispatchSpec* image_dispatch_spec) override; private: struct SendVisitor; struct PreprocessVisitor; using typename Dispatcher<ImageCtxT, ImageDispatcherInterface>::C_InvalidateCache; std::atomic<uint64_t> m_next_tid{0}; QosImageDispatch<ImageCtxT>* m_qos_image_dispatch = nullptr; WriteBlockImageDispatch<ImageCtxT>* m_write_block_dispatch = nullptr; bool preprocess(ImageDispatchSpec* image_dispatch_spec); }; } // namespace io } // namespace librbd extern template class librbd::io::ImageDispatcher<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_IMAGE_DISPATCHER_H
2,206
27.294872
84
h
null
ceph-main/src/librbd/io/ImageDispatcherInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCHER_INTERFACE_H #define CEPH_LIBRBD_IO_IMAGE_DISPATCHER_INTERFACE_H #include "include/int_types.h" #include "librbd/io/DispatcherInterface.h" #include "librbd/io/ImageDispatchInterface.h" #include "librbd/io/Types.h" struct Context; namespace librbd { namespace io { struct ImageDispatcherInterface : public DispatcherInterface<ImageDispatchInterface> { public: virtual void apply_qos_schedule_tick_min(uint64_t tick) = 0; virtual void apply_qos_limit(uint64_t flag, uint64_t limit, uint64_t burst, uint64_t burst_seconds) = 0; virtual void apply_qos_exclude_ops(uint64_t exclude_ops) = 0; virtual bool writes_blocked() const = 0; virtual int block_writes() = 0; virtual void block_writes(Context *on_blocked) = 0; virtual void unblock_writes() = 0; virtual void wait_on_writes_unblocked(Context *on_unblocked) = 0; virtual void invalidate_cache(Context* on_finish) = 0; virtual void remap_to_physical(Extents& image_extents, ImageArea area) = 0; virtual ImageArea remap_to_logical(Extents& image_extents) = 0; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_IMAGE_DISPATCHER_INTERFACE_H
1,308
30.166667
77
h
null
ceph-main/src/librbd/io/ImageRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ImageRequest.h" #include "librbd/ImageCtx.h" #include "librbd/internal.h" #include "librbd/Journal.h" #include "librbd/Types.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/AsyncOperation.h" #include "librbd/io/ObjectDispatchInterface.h" #include "librbd/io/ObjectDispatchSpec.h" #include "librbd/io/ObjectDispatcherInterface.h" #include "librbd/io/Utils.h" #include "librbd/journal/Types.h" #include "include/rados/librados.hpp" #include "common/errno.h" #include "common/perf_counters.h" #include "osdc/Striper.h" #include <algorithm> #include <functional> #include <map> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ImageRequest: " << __func__ << ": " namespace librbd { namespace io { using librbd::util::data_object_name; using librbd::util::get_image_ctx; namespace { template <typename I> struct C_AssembleSnapshotDeltas : public C_AioRequest { I* image_ctx; SnapshotDelta* snapshot_delta; ceph::mutex lock = ceph::make_mutex( "librbd::io::C_AssembleSnapshotDeltas::lock", false); std::map<uint64_t, SnapshotDelta> object_snapshot_delta; C_AssembleSnapshotDeltas(I* image_ctx, AioCompletion* aio_comp, SnapshotDelta* snapshot_delta) : C_AioRequest(aio_comp), image_ctx(image_ctx), snapshot_delta(snapshot_delta) { } SnapshotDelta* get_snapshot_delta(uint64_t object_no) { std::unique_lock locker{lock}; return &object_snapshot_delta[object_no]; } void finish(int r) override { auto cct = image_ctx->cct; if (r < 0) { lderr(cct) << "C_AssembleSnapshotDeltas: list snaps failed: " << cpp_strerror(r) << dendl; C_AioRequest::finish(r); return; } std::unique_lock locker{lock}; *snapshot_delta = {}; for (auto& [object_no, object_snapshot_delta] : object_snapshot_delta) { SnapshotDelta image_snapshot_delta; object_to_image_intervals(object_no, object_snapshot_delta, &image_snapshot_delta, snapshot_delta); ldout(cct, 20) << "object_no=" << object_no << ", " << "object_snapshot_delta=" << object_snapshot_delta << ", " << "image_snapshot_delta=" << image_snapshot_delta << dendl; } ldout(cct, 20) << "snapshot_delta=" << *snapshot_delta << dendl; C_AioRequest::finish(0); } void object_to_image_intervals( uint64_t object_no, const SnapshotDelta& object_snapshot_delta, SnapshotDelta* image_snapshot_delta, SnapshotDelta* assembled_image_snapshot_delta) { for (auto& [key, object_extents] : object_snapshot_delta) { for (auto& object_extent : object_extents) { auto [image_extents, _] = io::util::object_to_area_extents( image_ctx, object_no, {{object_extent.get_off(), object_extent.get_len()}}); auto& intervals = (*image_snapshot_delta)[key]; auto& assembled_intervals = (*assembled_image_snapshot_delta)[key]; for (auto [image_offset, image_length] : image_extents) { SparseExtent sparse_extent{object_extent.get_val().state, image_length}; intervals.insert(image_offset, image_length, sparse_extent); assembled_intervals.insert(image_offset, image_length, sparse_extent); } } } } }; template <typename I> struct C_RBD_Readahead : public Context { I *ictx; uint64_t object_no; io::ReadExtents extents; C_RBD_Readahead(I *ictx, uint64_t object_no, uint64_t offset, uint64_t length) : ictx(ictx), object_no(object_no), extents({{offset, length}}) { ictx->readahead.inc_pending(); } void finish(int r) override { ceph_assert(extents.size() == 1); auto& extent = extents.front(); ldout(ictx->cct, 20) << "C_RBD_Readahead on " << data_object_name(ictx, object_no) << ": " << extent.offset << "~" << extent.length << dendl; ictx->readahead.dec_pending(); } }; template <typename I> void readahead(I *ictx, const Extents& image_extents, IOContext io_context) { uint64_t total_bytes = 0; for (auto& image_extent : image_extents) { total_bytes += image_extent.second; } ictx->image_lock.lock_shared(); auto total_bytes_read = ictx->total_bytes_read.fetch_add(total_bytes); bool abort = ( ictx->readahead_disable_after_bytes != 0 && total_bytes_read > ictx->readahead_disable_after_bytes); if (abort) { ictx->image_lock.unlock_shared(); return; } uint64_t data_size = ictx->get_area_size(ImageArea::DATA); ictx->image_lock.unlock_shared(); auto readahead_extent = ictx->readahead.update(image_extents, data_size); uint64_t readahead_offset = readahead_extent.first; uint64_t readahead_length = readahead_extent.second; if (readahead_length > 0) { ldout(ictx->cct, 20) << "(readahead logical) " << readahead_offset << "~" << readahead_length << dendl; LightweightObjectExtents readahead_object_extents; io::util::area_to_object_extents(ictx, readahead_offset, readahead_length, ImageArea::DATA, 0, &readahead_object_extents); for (auto& object_extent : readahead_object_extents) { ldout(ictx->cct, 20) << "(readahead) " << data_object_name(ictx, object_extent.object_no) << " " << object_extent.offset << "~" << object_extent.length << dendl; auto req_comp = new C_RBD_Readahead<I>(ictx, object_extent.object_no, object_extent.offset, object_extent.length); auto req = io::ObjectDispatchSpec::create_read( ictx, io::OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no, &req_comp->extents, io_context, 0, 0, {}, nullptr, req_comp); req->send(); } ictx->perfcounter->inc(l_librbd_readahead); ictx->perfcounter->inc(l_librbd_readahead_bytes, readahead_length); } } template <typename I> struct C_UpdateTimestamp : public Context { public: I& m_image_ctx; bool m_modify; // if modify set to 'true', modify timestamp is updated, // access timestamp otherwise AsyncOperation m_async_op; C_UpdateTimestamp(I& ictx, bool m) : m_image_ctx(ictx), m_modify(m) { m_async_op.start_op(*get_image_ctx(&m_image_ctx)); } ~C_UpdateTimestamp() override { m_async_op.finish_op(); } void send() { librados::ObjectWriteOperation op; if (m_modify) { cls_client::set_modify_timestamp(&op); } else { cls_client::set_access_timestamp(&op); } auto comp = librbd::util::create_rados_callback(this); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op); ceph_assert(r == 0); comp->release(); } void finish(int r) override { // ignore errors updating timestamp } }; bool should_update_timestamp(const utime_t& now, const utime_t& timestamp, uint64_t interval) { return (interval && (static_cast<uint64_t>(now.sec()) >= interval + timestamp)); } } // anonymous namespace #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ImageRequest: " << this \ << " " << __func__ << ": " template <typename I> void ImageRequest<I>::aio_read(I *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace) { ImageReadRequest<I> req(*ictx, c, std::move(image_extents), area, std::move(read_result), io_context, op_flags, read_flags, parent_trace); req.send(); } template <typename I> void ImageRequest<I>::aio_write(I *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) { ImageWriteRequest<I> req(*ictx, c, std::move(image_extents), area, std::move(bl), op_flags, parent_trace); req.send(); } template <typename I> void ImageRequest<I>::aio_discard(I *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace) { ImageDiscardRequest<I> req(*ictx, c, std::move(image_extents), area, discard_granularity_bytes, parent_trace); req.send(); } template <typename I> void ImageRequest<I>::aio_flush(I *ictx, AioCompletion *c, FlushSource flush_source, const ZTracer::Trace &parent_trace) { ImageFlushRequest<I> req(*ictx, c, flush_source, parent_trace); req.send(); } template <typename I> void ImageRequest<I>::aio_writesame(I *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) { ImageWriteSameRequest<I> req(*ictx, c, std::move(image_extents), area, std::move(bl), op_flags, parent_trace); req.send(); } template <typename I> void ImageRequest<I>::aio_compare_and_write(I *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace) { ImageCompareAndWriteRequest<I> req(*ictx, c, std::move(image_extents), area, std::move(cmp_bl), std::move(bl), mismatch_offset, op_flags, parent_trace); req.send(); } template <typename I> void ImageRequest<I>::send() { I &image_ctx = this->m_image_ctx; ceph_assert(m_aio_comp->is_initialized(get_aio_type())); ceph_assert(m_aio_comp->is_started()); CephContext *cct = image_ctx.cct; AioCompletion *aio_comp = this->m_aio_comp; ldout(cct, 20) << get_request_type() << ": ictx=" << &image_ctx << ", " << "completion=" << aio_comp << dendl; update_timestamp(); send_request(); } template <typename I> void ImageRequest<I>::update_timestamp() { bool modify = (get_aio_type() != AIO_TYPE_READ); uint64_t update_interval; if (modify) { update_interval = m_image_ctx.mtime_update_interval; } else { update_interval = m_image_ctx.atime_update_interval; } if (update_interval == 0) { return; } utime_t (I::*get_timestamp_fn)() const; void (I::*set_timestamp_fn)(utime_t); if (modify) { get_timestamp_fn = &I::get_modify_timestamp; set_timestamp_fn = &I::set_modify_timestamp; } else { get_timestamp_fn = &I::get_access_timestamp; set_timestamp_fn = &I::set_access_timestamp; } utime_t ts = ceph_clock_now(); { std::shared_lock timestamp_locker{m_image_ctx.timestamp_lock}; if(!should_update_timestamp(ts, std::invoke(get_timestamp_fn, m_image_ctx), update_interval)) { return; } } { std::unique_lock timestamp_locker{m_image_ctx.timestamp_lock}; bool update = should_update_timestamp( ts, std::invoke(get_timestamp_fn, m_image_ctx), update_interval); if (!update) { return; } std::invoke(set_timestamp_fn, m_image_ctx, ts); } // TODO we fire and forget this outside the IO path to prevent // potential race conditions with librbd client IO callbacks // between different threads (e.g. librados and object cacher) ldout(m_image_ctx.cct, 10) << get_request_type() << dendl; auto req = new C_UpdateTimestamp<I>(m_image_ctx, modify); req->send(); } template <typename I> ImageReadRequest<I>::ImageReadRequest(I &image_ctx, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace) : ImageRequest<I>(image_ctx, aio_comp, std::move(image_extents), area, "read", parent_trace), m_io_context(io_context), m_op_flags(op_flags), m_read_flags(read_flags) { aio_comp->read_result = std::move(read_result); } template <typename I> void ImageReadRequest<I>::send_request() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; auto &image_extents = this->m_image_extents; if (this->m_image_area == ImageArea::DATA && image_ctx.cache && image_ctx.readahead_max_bytes > 0 && !(m_op_flags & LIBRADOS_OP_FLAG_FADVISE_RANDOM)) { readahead(get_image_ctx(&image_ctx), image_extents, m_io_context); } // map image extents to object extents LightweightObjectExtents object_extents; uint64_t buffer_ofs = 0; for (auto &extent : image_extents) { if (extent.second == 0) { continue; } util::area_to_object_extents(&image_ctx, extent.first, extent.second, this->m_image_area, buffer_ofs, &object_extents); buffer_ofs += extent.second; } AioCompletion *aio_comp = this->m_aio_comp; aio_comp->read_result.set_image_extents(image_extents); // issue the requests aio_comp->set_request_count(object_extents.size()); for (auto &oe : object_extents) { ldout(cct, 20) << data_object_name(&image_ctx, oe.object_no) << " " << oe.offset << "~" << oe.length << " from " << oe.buffer_extents << dendl; auto req_comp = new io::ReadResult::C_ObjectReadRequest( aio_comp, {{oe.offset, oe.length, std::move(oe.buffer_extents)}}); auto req = ObjectDispatchSpec::create_read( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, oe.object_no, &req_comp->extents, m_io_context, m_op_flags, m_read_flags, this->m_trace, nullptr, req_comp); req->send(); } image_ctx.perfcounter->inc(l_librbd_rd); image_ctx.perfcounter->inc(l_librbd_rd_bytes, buffer_ofs); } template <typename I> void AbstractImageWriteRequest<I>::send_request() { I &image_ctx = this->m_image_ctx; bool journaling = false; AioCompletion *aio_comp = this->m_aio_comp; { // prevent image size from changing between computing clip and recording // pending async operation std::shared_lock image_locker{image_ctx.image_lock}; journaling = (image_ctx.journal != nullptr && image_ctx.journal->is_journal_appending()); } uint64_t clip_len = 0; LightweightObjectExtents object_extents; for (auto &extent : this->m_image_extents) { if (extent.second == 0) { continue; } // map to object extents io::util::area_to_object_extents(&image_ctx, extent.first, extent.second, this->m_image_area, clip_len, &object_extents); clip_len += extent.second; } int ret = prune_object_extents(&object_extents); if (ret < 0) { aio_comp->fail(ret); return; } // reflect changes in object_extents back to m_image_extents if (ret == 1) { this->m_image_extents.clear(); for (auto& object_extent : object_extents) { auto [image_extents, _] = io::util::object_to_area_extents( &image_ctx, object_extent.object_no, {{object_extent.offset, object_extent.length}}); this->m_image_extents.insert(this->m_image_extents.end(), image_extents.begin(), image_extents.end()); } } aio_comp->set_request_count(object_extents.size()); if (!object_extents.empty()) { uint64_t journal_tid = 0; if (journaling) { // in-flight ops are flushed prior to closing the journal ceph_assert(image_ctx.journal != NULL); journal_tid = append_journal_event(m_synchronous); } // it's very important that IOContext is captured here instead of // e.g. at the API layer so that an up-to-date snap context is used // when owning the exclusive lock send_object_requests(object_extents, image_ctx.get_data_io_context(), journal_tid); } update_stats(clip_len); } template <typename I> void AbstractImageWriteRequest<I>::send_object_requests( const LightweightObjectExtents &object_extents, IOContext io_context, uint64_t journal_tid) { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; AioCompletion *aio_comp = this->m_aio_comp; bool single_extent = (object_extents.size() == 1); for (auto& oe : object_extents) { ldout(cct, 20) << data_object_name(&image_ctx, oe.object_no) << " " << oe.offset << "~" << oe.length << " from " << oe.buffer_extents << dendl; C_AioRequest *req_comp = new C_AioRequest(aio_comp); auto request = create_object_request(oe, io_context, journal_tid, single_extent, req_comp); request->send(); } } template <typename I> void ImageWriteRequest<I>::assemble_extent( const LightweightObjectExtent &object_extent, bufferlist *bl) { for (auto q = object_extent.buffer_extents.begin(); q != object_extent.buffer_extents.end(); ++q) { bufferlist sub_bl; sub_bl.substr_of(m_bl, q->first, q->second); bl->claim_append(sub_bl); } } template <typename I> uint64_t ImageWriteRequest<I>::append_journal_event(bool synchronous) { I &image_ctx = this->m_image_ctx; uint64_t tid = 0; uint64_t buffer_offset = 0; ceph_assert(!this->m_image_extents.empty()); for (auto &extent : this->m_image_extents) { bufferlist sub_bl; sub_bl.substr_of(m_bl, buffer_offset, extent.second); buffer_offset += extent.second; tid = image_ctx.journal->append_write_event(extent.first, extent.second, sub_bl, synchronous); } return tid; } template <typename I> ObjectDispatchSpec *ImageWriteRequest<I>::create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) { I &image_ctx = this->m_image_ctx; bufferlist bl; if (single_extent && object_extent.buffer_extents.size() == 1 && m_bl.length() == object_extent.length) { // optimization for single object/buffer extent writes bl = std::move(m_bl); } else { assemble_extent(object_extent, &bl); } auto req = ObjectDispatchSpec::create_write( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no, object_extent.offset, std::move(bl), io_context, m_op_flags, 0, std::nullopt, journal_tid, this->m_trace, on_finish); return req; } template <typename I> void ImageWriteRequest<I>::update_stats(size_t length) { I &image_ctx = this->m_image_ctx; image_ctx.perfcounter->inc(l_librbd_wr); image_ctx.perfcounter->inc(l_librbd_wr_bytes, length); } template <typename I> uint64_t ImageDiscardRequest<I>::append_journal_event(bool synchronous) { I &image_ctx = this->m_image_ctx; uint64_t tid = 0; ceph_assert(!this->m_image_extents.empty()); for (auto &extent : this->m_image_extents) { journal::EventEntry event_entry( journal::AioDiscardEvent(extent.first, extent.second, this->m_discard_granularity_bytes)); tid = image_ctx.journal->append_io_event(std::move(event_entry), extent.first, extent.second, synchronous, 0); } return tid; } template <typename I> ObjectDispatchSpec *ImageDiscardRequest<I>::create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) { I &image_ctx = this->m_image_ctx; auto req = ObjectDispatchSpec::create_discard( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no, object_extent.offset, object_extent.length, io_context, OBJECT_DISCARD_FLAG_DISABLE_CLONE_REMOVE, journal_tid, this->m_trace, on_finish); return req; } template <typename I> void ImageDiscardRequest<I>::update_stats(size_t length) { I &image_ctx = this->m_image_ctx; image_ctx.perfcounter->inc(l_librbd_discard); image_ctx.perfcounter->inc(l_librbd_discard_bytes, length); } template <typename I> int ImageDiscardRequest<I>::prune_object_extents( LightweightObjectExtents* object_extents) const { if (m_discard_granularity_bytes == 0) { return 0; } // Align the range to discard_granularity_bytes boundary and skip // and discards that are too small to free up any space. // // discard_granularity_bytes >= object_size && tail truncation // is a special case for filestore bool prune_required = false; bool length_modified = false; auto object_size = this->m_image_ctx.layout.object_size; auto discard_granularity_bytes = std::min(m_discard_granularity_bytes, object_size); auto xform_lambda = [discard_granularity_bytes, object_size, &prune_required, &length_modified] (LightweightObjectExtent& object_extent) { auto& offset = object_extent.offset; auto& length = object_extent.length; auto next_offset = offset + length; if ((discard_granularity_bytes < object_size) || (next_offset < object_size)) { offset = p2roundup<uint64_t>(offset, discard_granularity_bytes); next_offset = p2align<uint64_t>(next_offset, discard_granularity_bytes); if (offset >= next_offset) { prune_required = true; length = 0; } else { auto new_length = next_offset - offset; if (length != new_length) { length_modified = true; length = new_length; } } } }; std::for_each(object_extents->begin(), object_extents->end(), xform_lambda); if (prune_required) { // one or more object extents were skipped auto remove_lambda = [](const LightweightObjectExtent& object_extent) { return (object_extent.length == 0); }; object_extents->erase( std::remove_if(object_extents->begin(), object_extents->end(), remove_lambda), object_extents->end()); } // object extents were modified, image extents needs updating if (length_modified || prune_required) { return 1; } return 0; } template <typename I> void ImageFlushRequest<I>::send_request() { I &image_ctx = this->m_image_ctx; bool journaling = false; { std::shared_lock image_locker{image_ctx.image_lock}; journaling = (m_flush_source == FLUSH_SOURCE_USER && image_ctx.journal != nullptr && image_ctx.journal->is_journal_appending()); } AioCompletion *aio_comp = this->m_aio_comp; aio_comp->set_request_count(1); Context *ctx = new C_AioRequest(aio_comp); // ensure no locks are held when flush is complete ctx = librbd::util::create_async_context_callback(image_ctx, ctx); uint64_t journal_tid = 0; if (journaling) { // in-flight ops are flushed prior to closing the journal ceph_assert(image_ctx.journal != NULL); journal_tid = image_ctx.journal->append_io_event( journal::EventEntry(journal::AioFlushEvent()), 0, 0, false, 0); image_ctx.journal->user_flushed(); } auto object_dispatch_spec = ObjectDispatchSpec::create_flush( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, m_flush_source, journal_tid, this->m_trace, ctx); ctx = new LambdaContext([object_dispatch_spec](int r) { object_dispatch_spec->send(); }); // ensure all in-flight IOs are settled if non-user flush request if (m_flush_source == FLUSH_SOURCE_WRITEBACK) { ctx->complete(0); } else { aio_comp->async_op.flush(ctx); } // might be flushing during image shutdown if (image_ctx.perfcounter != nullptr) { image_ctx.perfcounter->inc(l_librbd_flush); } } template <typename I> uint64_t ImageWriteSameRequest<I>::append_journal_event(bool synchronous) { I &image_ctx = this->m_image_ctx; uint64_t tid = 0; ceph_assert(!this->m_image_extents.empty()); for (auto &extent : this->m_image_extents) { journal::EventEntry event_entry(journal::AioWriteSameEvent(extent.first, extent.second, m_data_bl)); tid = image_ctx.journal->append_io_event(std::move(event_entry), extent.first, extent.second, synchronous, 0); } return tid; } template <typename I> ObjectDispatchSpec *ImageWriteSameRequest<I>::create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) { I &image_ctx = this->m_image_ctx; bufferlist bl; ObjectDispatchSpec *req; if (util::assemble_write_same_extent(object_extent, m_data_bl, &bl, false)) { auto buffer_extents{object_extent.buffer_extents}; req = ObjectDispatchSpec::create_write_same( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no, object_extent.offset, object_extent.length, std::move(buffer_extents), std::move(bl), io_context, m_op_flags, journal_tid, this->m_trace, on_finish); return req; } req = ObjectDispatchSpec::create_write( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no, object_extent.offset, std::move(bl), io_context, m_op_flags, 0, std::nullopt, journal_tid, this->m_trace, on_finish); return req; } template <typename I> void ImageWriteSameRequest<I>::update_stats(size_t length) { I &image_ctx = this->m_image_ctx; image_ctx.perfcounter->inc(l_librbd_ws); image_ctx.perfcounter->inc(l_librbd_ws_bytes, length); } template <typename I> uint64_t ImageCompareAndWriteRequest<I>::append_journal_event( bool synchronous) { I &image_ctx = this->m_image_ctx; uint64_t tid = 0; ceph_assert(this->m_image_extents.size() == 1); auto &extent = this->m_image_extents.front(); tid = image_ctx.journal->append_compare_and_write_event(extent.first, extent.second, m_cmp_bl, m_bl, synchronous); return tid; } template <typename I> void ImageCompareAndWriteRequest<I>::assemble_extent( const LightweightObjectExtent &object_extent, bufferlist *bl, bufferlist *cmp_bl) { for (auto q = object_extent.buffer_extents.begin(); q != object_extent.buffer_extents.end(); ++q) { bufferlist sub_bl; sub_bl.substr_of(m_bl, q->first, q->second); bl->claim_append(sub_bl); bufferlist sub_cmp_bl; sub_cmp_bl.substr_of(m_cmp_bl, q->first, q->second); cmp_bl->claim_append(sub_cmp_bl); } } template <typename I> ObjectDispatchSpec *ImageCompareAndWriteRequest<I>::create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) { I &image_ctx = this->m_image_ctx; bufferlist bl; bufferlist cmp_bl; assemble_extent(object_extent, &bl, &cmp_bl); auto req = ObjectDispatchSpec::create_compare_and_write( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no, object_extent.offset, std::move(cmp_bl), std::move(bl), io_context, m_mismatch_offset, m_op_flags, journal_tid, this->m_trace, on_finish); return req; } template <typename I> void ImageCompareAndWriteRequest<I>::update_stats(size_t length) { I &image_ctx = this->m_image_ctx; image_ctx.perfcounter->inc(l_librbd_cmp); image_ctx.perfcounter->inc(l_librbd_cmp_bytes, length); } template <typename I> int ImageCompareAndWriteRequest<I>::prune_object_extents( LightweightObjectExtents* object_extents) const { if (object_extents->size() > 1) return -EINVAL; I &image_ctx = this->m_image_ctx; uint64_t su = image_ctx.layout.stripe_unit; auto& object_extent = object_extents->front(); if (su == 0 || (object_extent.offset % su + object_extent.length > su)) return -EINVAL; return 0; } template <typename I> ImageListSnapsRequest<I>::ImageListSnapsRequest( I& image_ctx, AioCompletion* aio_comp, Extents&& image_extents, ImageArea area, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace& parent_trace) : ImageRequest<I>(image_ctx, aio_comp, std::move(image_extents), area, "list-snaps", parent_trace), m_snap_ids(std::move(snap_ids)), m_list_snaps_flags(list_snaps_flags), m_snapshot_delta(snapshot_delta) { } template <typename I> void ImageListSnapsRequest<I>::send_request() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; // map image extents to object extents auto &image_extents = this->m_image_extents; std::map<uint64_t, Extents> object_number_extents; for (auto& image_extent : image_extents) { if (image_extent.second == 0) { continue; } striper::LightweightObjectExtents object_extents; io::util::area_to_object_extents(&image_ctx, image_extent.first, image_extent.second, this->m_image_area, 0, &object_extents); for (auto& object_extent : object_extents) { object_number_extents[object_extent.object_no].emplace_back( object_extent.offset, object_extent.length); } } // reassemble the deltas back into image-extents when complete auto aio_comp = this->m_aio_comp; aio_comp->set_request_count(1); auto assemble_ctx = new C_AssembleSnapshotDeltas<I>( &image_ctx, aio_comp, m_snapshot_delta); auto sub_aio_comp = AioCompletion::create_and_start< Context, &Context::complete>(assemble_ctx, get_image_ctx(&image_ctx), AIO_TYPE_GENERIC); // issue the requests sub_aio_comp->set_request_count(object_number_extents.size()); for (auto& oe : object_number_extents) { ldout(cct, 20) << data_object_name(&image_ctx, oe.first) << " " << oe.second << dendl; auto ctx = new C_AioRequest(sub_aio_comp); auto req = ObjectDispatchSpec::create_list_snaps( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, oe.first, std::move(oe.second), SnapIds{m_snap_ids}, m_list_snaps_flags, this->m_trace, assemble_ctx->get_snapshot_delta(oe.first), ctx); req->send(); } } } // namespace io } // namespace librbd template class librbd::io::ImageRequest<librbd::ImageCtx>; template class librbd::io::ImageReadRequest<librbd::ImageCtx>; template class librbd::io::AbstractImageWriteRequest<librbd::ImageCtx>; template class librbd::io::ImageWriteRequest<librbd::ImageCtx>; template class librbd::io::ImageDiscardRequest<librbd::ImageCtx>; template class librbd::io::ImageFlushRequest<librbd::ImageCtx>; template class librbd::io::ImageWriteSameRequest<librbd::ImageCtx>; template class librbd::io::ImageCompareAndWriteRequest<librbd::ImageCtx>; template class librbd::io::ImageListSnapsRequest<librbd::ImageCtx>;
32,378
34.581319
81
cc
null
ceph-main/src/librbd/io/ImageRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_IMAGE_REQUEST_H #define CEPH_LIBRBD_IO_IMAGE_REQUEST_H #include "include/int_types.h" #include "include/buffer_fwd.h" #include "common/zipkin_trace.h" #include "osd/osd_types.h" #include "librbd/Utils.h" #include "librbd/Types.h" #include "librbd/io/Types.h" #include <list> #include <utility> #include <vector> namespace librbd { class ImageCtx; namespace io { class AioCompletion; class ObjectDispatchSpec; class ReadResult; template <typename ImageCtxT = ImageCtx> class ImageRequest { public: virtual ~ImageRequest() { m_trace.event("finish"); } static void aio_read(ImageCtxT *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace); static void aio_write(ImageCtxT *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace); static void aio_discard(ImageCtxT *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace); static void aio_flush(ImageCtxT *ictx, AioCompletion *c, FlushSource flush_source, const ZTracer::Trace &parent_trace); static void aio_writesame(ImageCtxT *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace); static void aio_compare_and_write(ImageCtxT *ictx, AioCompletion *c, Extents &&image_extents, ImageArea area, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace); void send(); inline const ZTracer::Trace &get_trace() const { return m_trace; } protected: typedef std::list<ObjectDispatchSpec*> ObjectRequests; ImageCtxT &m_image_ctx; AioCompletion *m_aio_comp; Extents m_image_extents; ImageArea m_image_area; ZTracer::Trace m_trace; ImageRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, const char *trace_name, const ZTracer::Trace &parent_trace) : m_image_ctx(image_ctx), m_aio_comp(aio_comp), m_image_extents(std::move(image_extents)), m_image_area(area), m_trace(librbd::util::create_trace(image_ctx, trace_name, parent_trace)) { m_trace.event("start"); } virtual void update_timestamp(); virtual void send_request() = 0; virtual aio_type_t get_aio_type() const = 0; virtual const char *get_request_type() const = 0; }; template <typename ImageCtxT = ImageCtx> class ImageReadRequest : public ImageRequest<ImageCtxT> { public: ImageReadRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace); protected: void send_request() override; aio_type_t get_aio_type() const override { return AIO_TYPE_READ; } const char *get_request_type() const override { return "aio_read"; } private: IOContext m_io_context; int m_op_flags; int m_read_flags; }; template <typename ImageCtxT = ImageCtx> class AbstractImageWriteRequest : public ImageRequest<ImageCtxT> { public: inline void flag_synchronous() { m_synchronous = true; } protected: using typename ImageRequest<ImageCtxT>::ObjectRequests; AbstractImageWriteRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, const char *trace_name, const ZTracer::Trace &parent_trace) : ImageRequest<ImageCtxT>(image_ctx, aio_comp, std::move(image_extents), area, trace_name, parent_trace), m_synchronous(false) { } void send_request() override; virtual int prune_object_extents( LightweightObjectExtents* object_extents) const { return 0; } void send_object_requests(const LightweightObjectExtents &object_extents, IOContext io_context, uint64_t journal_tid); virtual ObjectDispatchSpec *create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) = 0; virtual uint64_t append_journal_event(bool synchronous) = 0; virtual void update_stats(size_t length) = 0; private: bool m_synchronous; }; template <typename ImageCtxT = ImageCtx> class ImageWriteRequest : public AbstractImageWriteRequest<ImageCtxT> { public: ImageWriteRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) : AbstractImageWriteRequest<ImageCtxT>( image_ctx, aio_comp, std::move(image_extents), area, "write", parent_trace), m_bl(std::move(bl)), m_op_flags(op_flags) { } protected: using typename ImageRequest<ImageCtxT>::ObjectRequests; aio_type_t get_aio_type() const override { return AIO_TYPE_WRITE; } const char *get_request_type() const override { return "aio_write"; } void assemble_extent(const LightweightObjectExtent &object_extent, bufferlist *bl); ObjectDispatchSpec *create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) override; uint64_t append_journal_event(bool synchronous) override; void update_stats(size_t length) override; private: bufferlist m_bl; int m_op_flags; }; template <typename ImageCtxT = ImageCtx> class ImageDiscardRequest : public AbstractImageWriteRequest<ImageCtxT> { public: ImageDiscardRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, Extents&& image_extents, ImageArea area, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace) : AbstractImageWriteRequest<ImageCtxT>( image_ctx, aio_comp, std::move(image_extents), area, "discard", parent_trace), m_discard_granularity_bytes(discard_granularity_bytes) { } protected: using typename ImageRequest<ImageCtxT>::ObjectRequests; aio_type_t get_aio_type() const override { return AIO_TYPE_DISCARD; } const char *get_request_type() const override { return "aio_discard"; } ObjectDispatchSpec *create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) override; uint64_t append_journal_event(bool synchronous) override; void update_stats(size_t length) override; int prune_object_extents( LightweightObjectExtents* object_extents) const override; private: uint32_t m_discard_granularity_bytes; }; template <typename ImageCtxT = ImageCtx> class ImageFlushRequest : public ImageRequest<ImageCtxT> { public: ImageFlushRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace) : ImageRequest<ImageCtxT>(image_ctx, aio_comp, {}, ImageArea::DATA /* dummy for {} */, "flush", parent_trace), m_flush_source(flush_source) { } protected: using typename ImageRequest<ImageCtxT>::ObjectRequests; void update_timestamp() override { } void send_request() override; aio_type_t get_aio_type() const override { return AIO_TYPE_FLUSH; } const char *get_request_type() const override { return "aio_flush"; } private: FlushSource m_flush_source; }; template <typename ImageCtxT = ImageCtx> class ImageWriteSameRequest : public AbstractImageWriteRequest<ImageCtxT> { public: ImageWriteSameRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, Extents&& image_extents, ImageArea area, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) : AbstractImageWriteRequest<ImageCtxT>( image_ctx, aio_comp, std::move(image_extents), area, "writesame", parent_trace), m_data_bl(std::move(bl)), m_op_flags(op_flags) { } protected: using typename ImageRequest<ImageCtxT>::ObjectRequests; aio_type_t get_aio_type() const override { return AIO_TYPE_WRITESAME; } const char *get_request_type() const override { return "aio_writesame"; } ObjectDispatchSpec *create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) override; uint64_t append_journal_event(bool synchronous) override; void update_stats(size_t length) override; private: bufferlist m_data_bl; int m_op_flags; }; template <typename ImageCtxT = ImageCtx> class ImageCompareAndWriteRequest : public AbstractImageWriteRequest<ImageCtxT> { public: using typename ImageRequest<ImageCtxT>::ObjectRequests; ImageCompareAndWriteRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp, Extents &&image_extents, ImageArea area, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace) : AbstractImageWriteRequest<ImageCtxT>( image_ctx, aio_comp, std::move(image_extents), area, "compare_and_write", parent_trace), m_cmp_bl(std::move(cmp_bl)), m_bl(std::move(bl)), m_mismatch_offset(mismatch_offset), m_op_flags(op_flags) { } protected: void assemble_extent(const LightweightObjectExtent &object_extent, bufferlist *bl, bufferlist *cmp_bl); ObjectDispatchSpec *create_object_request( const LightweightObjectExtent &object_extent, IOContext io_context, uint64_t journal_tid, bool single_extent, Context *on_finish) override; uint64_t append_journal_event(bool synchronous) override; void update_stats(size_t length) override; aio_type_t get_aio_type() const override { return AIO_TYPE_COMPARE_AND_WRITE; } const char *get_request_type() const override { return "aio_compare_and_write"; } int prune_object_extents( LightweightObjectExtents* object_extents) const override; private: bufferlist m_cmp_bl; bufferlist m_bl; uint64_t *m_mismatch_offset; int m_op_flags; }; template <typename ImageCtxT = ImageCtx> class ImageListSnapsRequest : public ImageRequest<ImageCtxT> { public: ImageListSnapsRequest( ImageCtxT& image_ctx, AioCompletion* aio_comp, Extents&& image_extents, ImageArea area, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace& parent_trace); protected: void update_timestamp() override {} void send_request() override; aio_type_t get_aio_type() const override { return AIO_TYPE_GENERIC; } const char *get_request_type() const override { return "list-snaps"; } private: SnapIds m_snap_ids; int m_list_snaps_flags; SnapshotDelta* m_snapshot_delta; }; } // namespace io } // namespace librbd extern template class librbd::io::ImageRequest<librbd::ImageCtx>; extern template class librbd::io::ImageReadRequest<librbd::ImageCtx>; extern template class librbd::io::AbstractImageWriteRequest<librbd::ImageCtx>; extern template class librbd::io::ImageWriteRequest<librbd::ImageCtx>; extern template class librbd::io::ImageDiscardRequest<librbd::ImageCtx>; extern template class librbd::io::ImageFlushRequest<librbd::ImageCtx>; extern template class librbd::io::ImageWriteSameRequest<librbd::ImageCtx>; extern template class librbd::io::ImageCompareAndWriteRequest<librbd::ImageCtx>; extern template class librbd::io::ImageListSnapsRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_IMAGE_REQUEST_H
12,696
32.589947
81
h
null
ceph-main/src/librbd/io/IoOperations.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <boost/lexical_cast.hpp> #include <boost/algorithm/string.hpp> #include "librbd/io/Types.h" #include "librbd/io/IoOperations.h" #include <map> #include <vector> namespace librbd { namespace io { #define RBD_IO_OPERATION_NAME_READ "read" #define RBD_IO_OPERATION_NAME_WRITE "write" #define RBD_IO_OPERATION_NAME_DISCARD "discard" #define RBD_IO_OPERATION_NAME_WRITE_SAME "write_same" #define RBD_IO_OPERATION_NAME_COMPARE_AND_WRITE "compare_and_write" static const std::map<std::string, uint64_t> RBD_IO_OPERATION_MAP = { {RBD_IO_OPERATION_NAME_READ, RBD_IO_OPERATION_READ}, {RBD_IO_OPERATION_NAME_WRITE, RBD_IO_OPERATION_WRITE}, {RBD_IO_OPERATION_NAME_DISCARD, RBD_IO_OPERATION_DISCARD}, {RBD_IO_OPERATION_NAME_WRITE_SAME, RBD_IO_OPERATION_WRITE_SAME}, {RBD_IO_OPERATION_NAME_COMPARE_AND_WRITE, RBD_IO_OPERATION_COMPARE_AND_WRITE}, }; static_assert((RBD_IO_OPERATION_COMPARE_AND_WRITE << 1) > RBD_IO_OPERATIONS_ALL, "new RBD io operation added"); std::string rbd_io_operations_to_string(uint64_t operations, std::ostream *err) { std::string r; for (auto& i : RBD_IO_OPERATION_MAP) { if (operations & i.second) { if (!r.empty()) { r += ","; } r += i.first; operations &= ~i.second; } } if (err && operations) { *err << "ignoring unknown io operation mask 0x" << std::hex << operations << std::dec; } return r; } uint64_t rbd_io_operations_from_string(const std::string& orig_value, std::ostream *err) { uint64_t operations = 0; std::string value = orig_value; boost::trim(value); // empty string means default operations if (!value.size()) { return RBD_IO_OPERATIONS_DEFAULT; } try { // numeric? operations = boost::lexical_cast<uint64_t>(value); // drop unrecognized bits uint64_t unsupported_operations = (operations & ~RBD_IO_OPERATIONS_ALL); if (unsupported_operations != 0ull) { operations &= RBD_IO_OPERATIONS_ALL; if (err) { *err << "ignoring unknown operation mask 0x" << std::hex << unsupported_operations << std::dec; } } } catch (boost::bad_lexical_cast&) { // operation name list? bool errors = false; std::vector<std::string> operation_names; boost::split(operation_names, value, boost::is_any_of(",")); for (auto operation_name: operation_names) { boost::trim(operation_name); auto operation_it = RBD_IO_OPERATION_MAP.find(operation_name); if (operation_it != RBD_IO_OPERATION_MAP.end()) { operations += operation_it->second; } else if (err) { if (errors) { *err << ", "; } else { errors = true; } *err << "ignoring unknown operation " << operation_name; } } } return operations; } } // namespace io } // namespace librbd
2,999
28.411765
80
cc
null
ceph-main/src/librbd/io/IoOperations.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <string> #include <ostream> namespace librbd { namespace io { std::string rbd_io_operations_to_string(uint64_t ops, std::ostream *err); uint64_t rbd_io_operations_from_string(const std::string& value, std::ostream *err); } // namespace io } // namespace librbd
473
23.947368
70
h
null
ceph-main/src/librbd/io/ObjectDispatch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ObjectDispatch.h" #include "common/dout.h" #include "librbd/AsioEngine.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/io/ObjectRequest.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ObjectDispatch: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { using librbd::util::data_object_name; template <typename I> ObjectDispatch<I>::ObjectDispatch(I* image_ctx) : m_image_ctx(image_ctx) { } template <typename I> void ObjectDispatch<I>::shut_down(Context* on_finish) { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; m_image_ctx->asio_engine->post(on_finish, 0); } template <typename I> bool ObjectDispatch<I>::read( uint64_t object_no, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "object_no=" << object_no << " " << *extents << dendl; *dispatch_result = DISPATCH_RESULT_COMPLETE; auto req = new ObjectReadRequest<I>(m_image_ctx, object_no, extents, io_context, op_flags, read_flags, parent_trace, version, on_dispatched); req->send(); return true; } template <typename I> bool ObjectDispatch<I>::discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << object_len << dendl; *dispatch_result = DISPATCH_RESULT_COMPLETE; auto req = new ObjectDiscardRequest<I>(m_image_ctx, object_no, object_off, object_len, io_context, discard_flags, parent_trace, on_dispatched); req->send(); return true; } template <typename I> bool ObjectDispatch<I>::write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << data.length() << dendl; *dispatch_result = DISPATCH_RESULT_COMPLETE; auto req = new ObjectWriteRequest<I>(m_image_ctx, object_no, object_off, std::move(data), io_context, op_flags, write_flags, assert_version, parent_trace, on_dispatched); req->send(); return true; } template <typename I> bool ObjectDispatch<I>::write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << object_len << dendl; *dispatch_result = DISPATCH_RESULT_COMPLETE; auto req = new ObjectWriteSameRequest<I>(m_image_ctx, object_no, object_off, object_len, std::move(data), io_context, op_flags, parent_trace, on_dispatched); req->send(); return true; } template <typename I> bool ObjectDispatch<I>::compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << write_data.length() << dendl; *dispatch_result = DISPATCH_RESULT_COMPLETE; auto req = new ObjectCompareAndWriteRequest<I>(m_image_ctx, object_no, object_off, std::move(cmp_data), std::move(write_data), io_context, mismatch_offset, op_flags, parent_trace, on_dispatched); req->send(); return true; } template <typename I> bool ObjectDispatch<I>::list_snaps( uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << "extents=" << extents << ", " << "snap_ids=" << snap_ids << dendl; *dispatch_result = DISPATCH_RESULT_COMPLETE; auto req = ObjectListSnapsRequest<I>::create( m_image_ctx, object_no, std::move(extents), std::move(snap_ids), list_snap_flags, parent_trace, snapshot_delta, on_dispatched); req->send(); return true; } } // namespace io } // namespace librbd template class librbd::io::ObjectDispatch<librbd::ImageCtx>;
6,378
38.376543
79
cc
null
ceph-main/src/librbd/io/ObjectDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCH_H #define CEPH_LIBRBD_IO_OBJECT_DISPATCH_H #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "common/zipkin_trace.h" #include "librbd/io/Types.h" #include "librbd/io/ObjectDispatchInterface.h" struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename ImageCtxT = librbd::ImageCtx> class ObjectDispatch : public ObjectDispatchInterface { public: ObjectDispatch(ImageCtxT* image_ctx); ObjectDispatchLayer get_dispatch_layer() const override { return OBJECT_DISPATCH_LAYER_CORE; } void shut_down(Context* on_finish) override; bool read( uint64_t object_no, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool list_snaps( uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool invalidate_cache(Context* on_finish) override { return false; } bool reset_existence_cache(Context* on_finish) override { return false; } void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) override { } int prepare_copyup( uint64_t object_no, SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override { return 0; } private: ImageCtxT* m_image_ctx; }; } // namespace io } // namespace librbd extern template class librbd::io::ObjectDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_OBJECT_DISPATCH_H
3,809
31.844828
75
h
null
ceph-main/src/librbd/io/ObjectDispatchInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCH_INTERFACE_H #define CEPH_LIBRBD_IO_OBJECT_DISPATCH_INTERFACE_H #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "common/zipkin_trace.h" #include "librbd/Types.h" #include "librbd/io/Types.h" struct Context; struct RWLock; namespace librbd { namespace io { struct AioCompletion; struct ObjectDispatchInterface; struct ObjectDispatchSpec; struct ObjectDispatchInterface { typedef ObjectDispatchInterface Dispatch; typedef ObjectDispatchLayer DispatchLayer; typedef ObjectDispatchSpec DispatchSpec; virtual ~ObjectDispatchInterface() { } virtual ObjectDispatchLayer get_dispatch_layer() const = 0; virtual void shut_down(Context* on_finish) = 0; virtual bool read( uint64_t object_no, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context**on_finish, Context* on_dispatched) = 0; virtual bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context**on_finish, Context* on_dispatched) = 0; virtual bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context**on_finish, Context* on_dispatched) = 0; virtual bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool flush( FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool list_snaps( uint64_t object_no, Extents&& extents, SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) = 0; virtual bool invalidate_cache(Context* on_finish) = 0; virtual bool reset_existence_cache(Context* on_finish) = 0; virtual void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) = 0; virtual int prepare_copyup( uint64_t object_no, SnapshotSparseBufferlist* snapshot_sparse_bufferlist) = 0; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_OBJECT_DISPATCH_INTERFACE_H
3,719
35.116505
77
h
null
ceph-main/src/librbd/io/ObjectDispatchSpec.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ObjectDispatchSpec.h" #include "include/Context.h" #include "librbd/io/ObjectDispatcherInterface.h" #include <boost/variant.hpp> namespace librbd { namespace io { void ObjectDispatchSpec::C_Dispatcher::complete(int r) { if (r < 0) { finish(r); return; } switch (object_dispatch_spec->dispatch_result) { case DISPATCH_RESULT_CONTINUE: object_dispatch_spec->send(); break; case DISPATCH_RESULT_COMPLETE: finish(r); break; case DISPATCH_RESULT_INVALID: case DISPATCH_RESULT_RESTART: ceph_abort(); break; } } void ObjectDispatchSpec::C_Dispatcher::finish(int r) { on_finish->complete(r); delete object_dispatch_spec; } void ObjectDispatchSpec::send() { object_dispatcher->send(this); } void ObjectDispatchSpec::fail(int r) { ceph_assert(r < 0); dispatcher_ctx.complete(r); } } // namespace io } // namespace librbd
995
19.75
70
cc
null
ceph-main/src/librbd/io/ObjectDispatchSpec.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCH_SPEC_H #define CEPH_LIBRBD_IO_OBJECT_DISPATCH_SPEC_H #include "include/int_types.h" #include "include/buffer.h" #include "include/Context.h" #include "include/rados/librados.hpp" #include "common/zipkin_trace.h" #include "librbd/Types.h" #include "librbd/io/Types.h" #include <boost/variant/variant.hpp> namespace librbd { namespace io { struct ObjectDispatcherInterface; struct ObjectDispatchSpec { private: // helper to avoid extra heap allocation per object IO struct C_Dispatcher : public Context { ObjectDispatchSpec* object_dispatch_spec; Context* on_finish; C_Dispatcher(ObjectDispatchSpec* object_dispatch_spec, Context* on_finish) : object_dispatch_spec(object_dispatch_spec), on_finish(on_finish) { } void complete(int r) override; void finish(int r) override; }; public: struct RequestBase { uint64_t object_no; RequestBase(uint64_t object_no) : object_no(object_no) { } }; struct ReadRequest : public RequestBase { ReadExtents* extents; int read_flags; uint64_t* version; ReadRequest(uint64_t object_no, ReadExtents* extents, int read_flags, uint64_t* version) : RequestBase(object_no), extents(extents), read_flags(read_flags), version(version) { } }; struct WriteRequestBase : public RequestBase { uint64_t object_off; uint64_t journal_tid; WriteRequestBase(uint64_t object_no, uint64_t object_off, uint64_t journal_tid) : RequestBase(object_no), object_off(object_off), journal_tid(journal_tid) { } }; struct DiscardRequest : public WriteRequestBase { uint64_t object_len; int discard_flags; DiscardRequest(uint64_t object_no, uint64_t object_off, uint64_t object_len, int discard_flags, uint64_t journal_tid) : WriteRequestBase(object_no, object_off, journal_tid), object_len(object_len), discard_flags(discard_flags) { } }; struct WriteRequest : public WriteRequestBase { ceph::bufferlist data; int write_flags; std::optional<uint64_t> assert_version; WriteRequest(uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, int write_flags, std::optional<uint64_t> assert_version, uint64_t journal_tid) : WriteRequestBase(object_no, object_off, journal_tid), data(std::move(data)), write_flags(write_flags), assert_version(assert_version) { } }; struct WriteSameRequest : public WriteRequestBase { uint64_t object_len; LightweightBufferExtents buffer_extents; ceph::bufferlist data; WriteSameRequest(uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, uint64_t journal_tid) : WriteRequestBase(object_no, object_off, journal_tid), object_len(object_len), buffer_extents(std::move(buffer_extents)), data(std::move(data)) { } }; struct CompareAndWriteRequest : public WriteRequestBase { ceph::bufferlist cmp_data; ceph::bufferlist data; uint64_t* mismatch_offset; CompareAndWriteRequest(uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& data, uint64_t* mismatch_offset, uint64_t journal_tid) : WriteRequestBase(object_no, object_off, journal_tid), cmp_data(std::move(cmp_data)), data(std::move(data)), mismatch_offset(mismatch_offset) { } }; struct FlushRequest { FlushSource flush_source; uint64_t journal_tid; FlushRequest(FlushSource flush_source, uint64_t journal_tid) : flush_source(flush_source), journal_tid(journal_tid) { } }; struct ListSnapsRequest : public RequestBase { Extents extents; SnapIds snap_ids; int list_snaps_flags; SnapshotDelta* snapshot_delta; ListSnapsRequest(uint64_t object_no, Extents&& extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta) : RequestBase(object_no), extents(std::move(extents)), snap_ids(std::move(snap_ids)),list_snaps_flags(list_snaps_flags), snapshot_delta(snapshot_delta) { } }; typedef boost::variant<ReadRequest, DiscardRequest, WriteRequest, WriteSameRequest, CompareAndWriteRequest, FlushRequest, ListSnapsRequest> Request; C_Dispatcher dispatcher_ctx; ObjectDispatcherInterface* object_dispatcher; ObjectDispatchLayer dispatch_layer; int object_dispatch_flags = 0; DispatchResult dispatch_result = DISPATCH_RESULT_INVALID; Request request; IOContext io_context; int op_flags; ZTracer::Trace parent_trace; template <typename ImageCtxT> static ObjectDispatchSpec* create_read( ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer, uint64_t object_no, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, Context* on_finish) { return new ObjectDispatchSpec(image_ctx->io_object_dispatcher, object_dispatch_layer, ReadRequest{object_no, extents, read_flags, version}, io_context, op_flags, parent_trace, on_finish); } template <typename ImageCtxT> static ObjectDispatchSpec* create_discard( ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer, uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, uint64_t journal_tid, const ZTracer::Trace &parent_trace, Context *on_finish) { return new ObjectDispatchSpec(image_ctx->io_object_dispatcher, object_dispatch_layer, DiscardRequest{object_no, object_off, object_len, discard_flags, journal_tid}, io_context, 0, parent_trace, on_finish); } template <typename ImageCtxT> static ObjectDispatchSpec* create_write( ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, uint64_t journal_tid, const ZTracer::Trace &parent_trace, Context *on_finish) { return new ObjectDispatchSpec(image_ctx->io_object_dispatcher, object_dispatch_layer, WriteRequest{object_no, object_off, std::move(data), write_flags, assert_version, journal_tid}, io_context, op_flags, parent_trace, on_finish); } template <typename ImageCtxT> static ObjectDispatchSpec* create_write_same( ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer, uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, uint64_t journal_tid, const ZTracer::Trace &parent_trace, Context *on_finish) { return new ObjectDispatchSpec(image_ctx->io_object_dispatcher, object_dispatch_layer, WriteSameRequest{object_no, object_off, object_len, std::move(buffer_extents), std::move(data), journal_tid}, io_context, op_flags, parent_trace, on_finish); } template <typename ImageCtxT> static ObjectDispatchSpec* create_compare_and_write( ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, uint64_t *mismatch_offset, int op_flags, uint64_t journal_tid, const ZTracer::Trace &parent_trace, Context *on_finish) { return new ObjectDispatchSpec(image_ctx->io_object_dispatcher, object_dispatch_layer, CompareAndWriteRequest{object_no, object_off, std::move(cmp_data), std::move(write_data), mismatch_offset, journal_tid}, io_context, op_flags, parent_trace, on_finish); } template <typename ImageCtxT> static ObjectDispatchSpec* create_flush( ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer, FlushSource flush_source, uint64_t journal_tid, const ZTracer::Trace &parent_trace, Context *on_finish) { return new ObjectDispatchSpec(image_ctx->io_object_dispatcher, object_dispatch_layer, FlushRequest{flush_source, journal_tid}, {}, 0, parent_trace, on_finish); } template <typename ImageCtxT> static ObjectDispatchSpec* create_list_snaps( ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer, uint64_t object_no, Extents&& extents, SnapIds&& snap_ids, int list_snaps_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, Context* on_finish) { return new ObjectDispatchSpec(image_ctx->io_object_dispatcher, object_dispatch_layer, ListSnapsRequest{object_no, std::move(extents), std::move(snap_ids), list_snaps_flags, snapshot_delta}, {}, 0, parent_trace, on_finish); } void send(); void fail(int r); private: template <typename> friend class ObjectDispatcher; ObjectDispatchSpec(ObjectDispatcherInterface* object_dispatcher, ObjectDispatchLayer object_dispatch_layer, Request&& request, IOContext io_context, int op_flags, const ZTracer::Trace& parent_trace, Context* on_finish) : dispatcher_ctx(this, on_finish), object_dispatcher(object_dispatcher), dispatch_layer(object_dispatch_layer), request(std::move(request)), io_context(io_context), op_flags(op_flags), parent_trace(parent_trace) { } }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_OBJECT_DISPATCH_SPEC_H
11,715
38.581081
80
h
null
ceph-main/src/librbd/io/ObjectDispatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ObjectDispatcher.h" #include "include/Context.h" #include "common/AsyncOpTracker.h" #include "common/dout.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/io/ObjectDispatch.h" #include "librbd/io/ObjectDispatchSpec.h" #include <boost/variant.hpp> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ObjectDispatcher: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { template <typename I> struct ObjectDispatcher<I>::C_ResetExistenceCache : public C_LayerIterator { C_ResetExistenceCache(ObjectDispatcher* object_dispatcher, Context* on_finish) : C_LayerIterator(object_dispatcher, OBJECT_DISPATCH_LAYER_NONE, on_finish) { } bool execute(ObjectDispatchInterface* object_dispatch, Context* on_finish) override { return object_dispatch->reset_existence_cache(on_finish); } }; template <typename I> struct ObjectDispatcher<I>::SendVisitor : public boost::static_visitor<bool> { ObjectDispatchInterface* object_dispatch; ObjectDispatchSpec* object_dispatch_spec; SendVisitor(ObjectDispatchInterface* object_dispatch, ObjectDispatchSpec* object_dispatch_spec) : object_dispatch(object_dispatch), object_dispatch_spec(object_dispatch_spec) { } bool operator()(ObjectDispatchSpec::ReadRequest& read) const { return object_dispatch->read( read.object_no, read.extents, object_dispatch_spec->io_context, object_dispatch_spec->op_flags, read.read_flags, object_dispatch_spec->parent_trace, read.version, &object_dispatch_spec->object_dispatch_flags, &object_dispatch_spec->dispatch_result, &object_dispatch_spec->dispatcher_ctx.on_finish, &object_dispatch_spec->dispatcher_ctx); } bool operator()(ObjectDispatchSpec::DiscardRequest& discard) const { return object_dispatch->discard( discard.object_no, discard.object_off, discard.object_len, object_dispatch_spec->io_context, discard.discard_flags, object_dispatch_spec->parent_trace, &object_dispatch_spec->object_dispatch_flags, &discard.journal_tid, &object_dispatch_spec->dispatch_result, &object_dispatch_spec->dispatcher_ctx.on_finish, &object_dispatch_spec->dispatcher_ctx); } bool operator()(ObjectDispatchSpec::WriteRequest& write) const { return object_dispatch->write( write.object_no, write.object_off, std::move(write.data), object_dispatch_spec->io_context, object_dispatch_spec->op_flags, write.write_flags, write.assert_version, object_dispatch_spec->parent_trace, &object_dispatch_spec->object_dispatch_flags, &write.journal_tid, &object_dispatch_spec->dispatch_result, &object_dispatch_spec->dispatcher_ctx.on_finish, &object_dispatch_spec->dispatcher_ctx); } bool operator()(ObjectDispatchSpec::WriteSameRequest& write_same) const { return object_dispatch->write_same( write_same.object_no, write_same.object_off, write_same.object_len, std::move(write_same.buffer_extents), std::move(write_same.data), object_dispatch_spec->io_context, object_dispatch_spec->op_flags, object_dispatch_spec->parent_trace, &object_dispatch_spec->object_dispatch_flags, &write_same.journal_tid, &object_dispatch_spec->dispatch_result, &object_dispatch_spec->dispatcher_ctx.on_finish, &object_dispatch_spec->dispatcher_ctx); } bool operator()( ObjectDispatchSpec::CompareAndWriteRequest& compare_and_write) const { return object_dispatch->compare_and_write( compare_and_write.object_no, compare_and_write.object_off, std::move(compare_and_write.cmp_data), std::move(compare_and_write.data), object_dispatch_spec->io_context, object_dispatch_spec->op_flags, object_dispatch_spec->parent_trace, compare_and_write.mismatch_offset, &object_dispatch_spec->object_dispatch_flags, &compare_and_write.journal_tid, &object_dispatch_spec->dispatch_result, &object_dispatch_spec->dispatcher_ctx.on_finish, &object_dispatch_spec->dispatcher_ctx); } bool operator()(ObjectDispatchSpec::FlushRequest& flush) const { return object_dispatch->flush( flush.flush_source, object_dispatch_spec->parent_trace, &flush.journal_tid, &object_dispatch_spec->dispatch_result, &object_dispatch_spec->dispatcher_ctx.on_finish, &object_dispatch_spec->dispatcher_ctx); } bool operator()(ObjectDispatchSpec::ListSnapsRequest& list_snaps) const { return object_dispatch->list_snaps( list_snaps.object_no, std::move(list_snaps.extents), std::move(list_snaps.snap_ids), list_snaps.list_snaps_flags, object_dispatch_spec->parent_trace, list_snaps.snapshot_delta, &object_dispatch_spec->object_dispatch_flags, &object_dispatch_spec->dispatch_result, &object_dispatch_spec->dispatcher_ctx.on_finish, &object_dispatch_spec->dispatcher_ctx); } }; template <typename I> ObjectDispatcher<I>::ObjectDispatcher(I* image_ctx) : Dispatcher<I, ObjectDispatcherInterface>(image_ctx) { // configure the core object dispatch handler on startup auto object_dispatch = new ObjectDispatch(image_ctx); this->register_dispatch(object_dispatch); } template <typename I> void ObjectDispatcher<I>::invalidate_cache(Context* on_finish) { auto image_ctx = this->m_image_ctx; auto cct = image_ctx->cct; ldout(cct, 5) << dendl; on_finish = util::create_async_context_callback(*image_ctx, on_finish); auto ctx = new C_InvalidateCache( this, OBJECT_DISPATCH_LAYER_NONE, on_finish); ctx->complete(0); } template <typename I> void ObjectDispatcher<I>::reset_existence_cache(Context* on_finish) { auto image_ctx = this->m_image_ctx; auto cct = image_ctx->cct; ldout(cct, 5) << dendl; on_finish = util::create_async_context_callback(*image_ctx, on_finish); auto ctx = new C_ResetExistenceCache(this, on_finish); ctx->complete(0); } template <typename I> void ObjectDispatcher<I>::extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) { auto cct = this->m_image_ctx->cct; ldout(cct, 20) << object_no << " " << object_off << "~" << object_len << dendl; std::shared_lock locker{this->m_lock}; for (auto it : this->m_dispatches) { auto& object_dispatch_meta = it.second; auto object_dispatch = object_dispatch_meta.dispatch; object_dispatch->extent_overwritten(object_no, object_off, object_len, journal_tid, new_journal_tid); } } template <typename I> int ObjectDispatcher<I>::prepare_copyup( uint64_t object_no, SnapshotSparseBufferlist* snapshot_sparse_bufferlist) { auto cct = this->m_image_ctx->cct; ldout(cct, 20) << "object_no=" << object_no << dendl; std::shared_lock locker{this->m_lock}; for (auto it : this->m_dispatches) { auto& object_dispatch_meta = it.second; auto object_dispatch = object_dispatch_meta.dispatch; auto r = object_dispatch->prepare_copyup( object_no, snapshot_sparse_bufferlist); if (r < 0) { return r; } } return 0; } template <typename I> bool ObjectDispatcher<I>::send_dispatch( ObjectDispatchInterface* object_dispatch, ObjectDispatchSpec* object_dispatch_spec) { return boost::apply_visitor( SendVisitor{object_dispatch, object_dispatch_spec}, object_dispatch_spec->request); } } // namespace io } // namespace librbd template class librbd::io::ObjectDispatcher<librbd::ImageCtx>;
7,792
36.287081
81
cc
null
ceph-main/src/librbd/io/ObjectDispatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H #define CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H #include "include/int_types.h" #include "common/ceph_mutex.h" #include "librbd/io/Dispatcher.h" #include "librbd/io/ObjectDispatchInterface.h" #include "librbd/io/ObjectDispatchSpec.h" #include "librbd/io/ObjectDispatcherInterface.h" #include "librbd/io/Types.h" #include <map> struct Context; namespace librbd { struct ImageCtx; namespace io { template <typename ImageCtxT = ImageCtx> class ObjectDispatcher : public Dispatcher<ImageCtxT, ObjectDispatcherInterface> { public: ObjectDispatcher(ImageCtxT* image_ctx); void invalidate_cache(Context* on_finish) override; void reset_existence_cache(Context* on_finish) override; void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) override; int prepare_copyup( uint64_t object_no, SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override; using typename Dispatcher<ImageCtxT, ObjectDispatcherInterface>::C_LayerIterator; using typename Dispatcher<ImageCtxT, ObjectDispatcherInterface>::C_InvalidateCache; protected: bool send_dispatch(ObjectDispatchInterface* object_dispatch, ObjectDispatchSpec* object_dispatch_spec) override; private: struct C_ResetExistenceCache; struct SendVisitor; }; } // namespace io } // namespace librbd extern template class librbd::io::ObjectDispatcher<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H
1,651
26.081967
85
h
null
ceph-main/src/librbd/io/ObjectDispatcherInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCHER_INTERFACE_H #define CEPH_LIBRBD_IO_OBJECT_DISPATCHER_INTERFACE_H #include "include/int_types.h" #include "librbd/io/DispatcherInterface.h" #include "librbd/io/ObjectDispatchInterface.h" struct Context; namespace librbd { namespace io { struct ObjectDispatcherInterface : public DispatcherInterface<ObjectDispatchInterface> { public: virtual void invalidate_cache(Context* on_finish) = 0; virtual void reset_existence_cache(Context* on_finish) = 0; virtual void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) = 0; virtual int prepare_copyup( uint64_t object_no, SnapshotSparseBufferlist* snapshot_sparse_bufferlist) = 0; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_OBJECT_DISPATCHER_INTERFACE_H
984
26.361111
70
h
null
ceph-main/src/librbd/io/ObjectRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ObjectRequest.h" #include "common/ceph_context.h" #include "common/dout.h" #include "common/errno.h" #include "common/ceph_mutex.h" #include "include/Context.h" #include "include/err.h" #include "include/neorados/RADOS.hpp" #include "osd/osd_types.h" #include "librados/snap_set_diff.h" #include "librbd/AsioEngine.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/asio/Utils.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/CopyupRequest.h" #include "librbd/io/ImageRequest.h" #include "librbd/io/Utils.h" #include <boost/optional.hpp> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ObjectRequest: " << this \ << " " << __func__ << ": " \ << data_object_name(this->m_ictx, \ this->m_object_no) << " " namespace librbd { namespace io { using librbd::util::data_object_name; using librbd::util::create_context_callback; using librbd::util::create_trace; namespace { template <typename I> inline bool is_copy_on_read(I *ictx, const IOContext& io_context) { std::shared_lock image_locker{ictx->image_lock}; return (ictx->clone_copy_on_read && !ictx->read_only && io_context->read_snap().value_or(CEPH_NOSNAP) == CEPH_NOSNAP && (ictx->exclusive_lock == nullptr || ictx->exclusive_lock->is_lock_owner())); } template <typename S, typename D> void convert_snap_set(const S& src_snap_set, D* dst_snap_set) { dst_snap_set->seq = src_snap_set.seq; dst_snap_set->clones.reserve(src_snap_set.clones.size()); for (auto& src_clone : src_snap_set.clones) { dst_snap_set->clones.emplace_back(); auto& dst_clone = dst_snap_set->clones.back(); dst_clone.cloneid = src_clone.cloneid; dst_clone.snaps = src_clone.snaps; dst_clone.overlap = src_clone.overlap; dst_clone.size = src_clone.size; } } } // anonymous namespace template <typename I> ObjectRequest<I>* ObjectRequest<I>::create_write( I *ictx, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, Context *completion) { return new ObjectWriteRequest<I>(ictx, object_no, object_off, std::move(data), io_context, op_flags, write_flags, assert_version, parent_trace, completion); } template <typename I> ObjectRequest<I>* ObjectRequest<I>::create_discard( I *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, Context *completion) { return new ObjectDiscardRequest<I>(ictx, object_no, object_off, object_len, io_context, discard_flags, parent_trace, completion); } template <typename I> ObjectRequest<I>* ObjectRequest<I>::create_write_same( I *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, Context *completion) { return new ObjectWriteSameRequest<I>(ictx, object_no, object_off, object_len, std::move(data), io_context, op_flags, parent_trace, completion); } template <typename I> ObjectRequest<I>* ObjectRequest<I>::create_compare_and_write( I *ictx, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, Context *completion) { return new ObjectCompareAndWriteRequest<I>(ictx, object_no, object_off, std::move(cmp_data), std::move(write_data), io_context, mismatch_offset, op_flags, parent_trace, completion); } template <typename I> ObjectRequest<I>::ObjectRequest( I *ictx, uint64_t objectno, IOContext io_context, const char *trace_name, const ZTracer::Trace &trace, Context *completion) : m_ictx(ictx), m_object_no(objectno), m_io_context(io_context), m_completion(completion), m_trace(create_trace(*ictx, "", trace)) { ceph_assert(m_ictx->data_ctx.is_valid()); if (m_trace.valid()) { m_trace.copy_name(trace_name + std::string(" ") + data_object_name(ictx, objectno)); m_trace.event("start"); } } template <typename I> void ObjectRequest<I>::add_write_hint(I& image_ctx, neorados::WriteOp* wr) { auto alloc_hint_flags = static_cast<neorados::alloc_hint::alloc_hint_t>( image_ctx.alloc_hint_flags); if (image_ctx.enable_alloc_hint) { wr->set_alloc_hint(image_ctx.get_object_size(), image_ctx.get_object_size(), alloc_hint_flags); } else if (image_ctx.alloc_hint_flags != 0U) { wr->set_alloc_hint(0, 0, alloc_hint_flags); } } template <typename I> bool ObjectRequest<I>::compute_parent_extents(Extents *parent_extents, ImageArea *area, bool read_request) { ceph_assert(ceph_mutex_is_locked(m_ictx->image_lock)); m_has_parent = false; parent_extents->clear(); *area = ImageArea::DATA; uint64_t raw_overlap; int r = m_ictx->get_parent_overlap( m_io_context->read_snap().value_or(CEPH_NOSNAP), &raw_overlap); if (r < 0) { // NOTE: it's possible for a snapshot to be deleted while we are // still reading from it lderr(m_ictx->cct) << "failed to retrieve parent overlap: " << cpp_strerror(r) << dendl; return false; } bool migration_write = !read_request && !m_ictx->migration_info.empty(); if (migration_write) { raw_overlap = m_ictx->migration_info.overlap; } if (raw_overlap == 0) { return false; } std::tie(*parent_extents, *area) = io::util::object_to_area_extents( m_ictx, m_object_no, {{0, m_ictx->layout.object_size}}); uint64_t object_overlap = m_ictx->prune_parent_extents( *parent_extents, *area, raw_overlap, migration_write); if (object_overlap > 0) { m_has_parent = true; return true; } return false; } template <typename I> void ObjectRequest<I>::async_finish(int r) { ldout(m_ictx->cct, 20) << "r=" << r << dendl; m_ictx->asio_engine->post([this, r]() { finish(r); }); } template <typename I> void ObjectRequest<I>::finish(int r) { ldout(m_ictx->cct, 20) << "r=" << r << dendl; m_completion->complete(r); delete this; } /** read **/ template <typename I> ObjectReadRequest<I>::ObjectReadRequest( I *ictx, uint64_t objectno, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, Context *completion) : ObjectRequest<I>(ictx, objectno, io_context, "read", parent_trace, completion), m_extents(extents), m_op_flags(op_flags),m_read_flags(read_flags), m_version(version) { } template <typename I> void ObjectReadRequest<I>::send() { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << dendl; read_object(); } template <typename I> void ObjectReadRequest<I>::read_object() { I *image_ctx = this->m_ictx; std::shared_lock image_locker{image_ctx->image_lock}; auto read_snap_id = this->m_io_context->read_snap().value_or(CEPH_NOSNAP); if (read_snap_id == image_ctx->snap_id && image_ctx->object_map != nullptr && !image_ctx->object_map->object_may_exist(this->m_object_no)) { image_ctx->asio_engine->post([this]() { read_parent(); }); return; } image_locker.unlock(); ldout(image_ctx->cct, 20) << "snap_id=" << read_snap_id << dendl; neorados::ReadOp read_op; for (auto& extent: *this->m_extents) { if (extent.length >= image_ctx->sparse_read_threshold_bytes) { read_op.sparse_read(extent.offset, extent.length, &extent.bl, &extent.extent_map); } else { read_op.read(extent.offset, extent.length, &extent.bl); } } util::apply_op_flags( m_op_flags, image_ctx->get_read_flags(read_snap_id), &read_op); image_ctx->rados_api.execute( {data_object_name(this->m_ictx, this->m_object_no)}, *this->m_io_context, std::move(read_op), nullptr, librbd::asio::util::get_callback_adapter( [this](int r) { handle_read_object(r); }), m_version, (this->m_trace.valid() ? this->m_trace.get_info() : nullptr)); } template <typename I> void ObjectReadRequest<I>::handle_read_object(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; if (m_version != nullptr) { ldout(image_ctx->cct, 20) << "version=" << *m_version << dendl; } if (r == -ENOENT) { read_parent(); return; } else if (r < 0) { lderr(image_ctx->cct) << "failed to read from object: " << cpp_strerror(r) << dendl; this->finish(r); return; } this->finish(0); } template <typename I> void ObjectReadRequest<I>::read_parent() { if ((m_read_flags & READ_FLAG_DISABLE_READ_FROM_PARENT) != 0) { this->finish(-ENOENT); return; } I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << dendl; auto ctx = create_context_callback< ObjectReadRequest<I>, &ObjectReadRequest<I>::handle_read_parent>(this); io::util::read_parent<I>( image_ctx, this->m_object_no, this->m_extents, this->m_io_context->read_snap().value_or(CEPH_NOSNAP), this->m_trace, ctx); } template <typename I> void ObjectReadRequest<I>::handle_read_parent(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; if (r == -ENOENT) { this->finish(r); return; } else if (r < 0) { lderr(image_ctx->cct) << "failed to read parent extents: " << cpp_strerror(r) << dendl; this->finish(r); return; } copyup(); } template <typename I> void ObjectReadRequest<I>::copyup() { I *image_ctx = this->m_ictx; if (!is_copy_on_read(image_ctx, this->m_io_context)) { this->finish(0); return; } image_ctx->owner_lock.lock_shared(); image_ctx->image_lock.lock_shared(); Extents parent_extents; ImageArea area; if (!this->compute_parent_extents(&parent_extents, &area, true) || (image_ctx->exclusive_lock != nullptr && !image_ctx->exclusive_lock->is_lock_owner())) { image_ctx->image_lock.unlock_shared(); image_ctx->owner_lock.unlock_shared(); this->finish(0); return; } ldout(image_ctx->cct, 20) << dendl; image_ctx->copyup_list_lock.lock(); auto it = image_ctx->copyup_list.find(this->m_object_no); if (it == image_ctx->copyup_list.end()) { // create and kick off a CopyupRequest auto new_req = CopyupRequest<I>::create( image_ctx, this->m_object_no, std::move(parent_extents), area, this->m_trace); image_ctx->copyup_list[this->m_object_no] = new_req; image_ctx->copyup_list_lock.unlock(); image_ctx->image_lock.unlock_shared(); new_req->send(); } else { image_ctx->copyup_list_lock.unlock(); image_ctx->image_lock.unlock_shared(); } image_ctx->owner_lock.unlock_shared(); this->finish(0); } /** write **/ template <typename I> AbstractObjectWriteRequest<I>::AbstractObjectWriteRequest( I *ictx, uint64_t object_no, uint64_t object_off, uint64_t len, IOContext io_context, const char *trace_name, const ZTracer::Trace &parent_trace, Context *completion) : ObjectRequest<I>(ictx, object_no, io_context, trace_name, parent_trace, completion), m_object_off(object_off), m_object_len(len) { if (this->m_object_off == 0 && this->m_object_len == ictx->get_object_size()) { m_full_object = true; } compute_parent_info(); ictx->image_lock.lock_shared(); if (!ictx->migration_info.empty()) { m_guarding_migration_write = true; } ictx->image_lock.unlock_shared(); } template <typename I> void AbstractObjectWriteRequest<I>::compute_parent_info() { I *image_ctx = this->m_ictx; std::shared_lock image_locker{image_ctx->image_lock}; this->compute_parent_extents(&m_parent_extents, &m_image_area, false); if (!this->has_parent() || (m_full_object && !this->m_io_context->write_snap_context() && !is_post_copyup_write_required())) { m_copyup_enabled = false; } } template <typename I> void AbstractObjectWriteRequest<I>::add_write_hint( neorados::WriteOp *wr) { I *image_ctx = this->m_ictx; std::shared_lock image_locker{image_ctx->image_lock}; if (image_ctx->object_map == nullptr || !this->m_object_may_exist || image_ctx->alloc_hint_flags != 0U) { ObjectRequest<I>::add_write_hint(*image_ctx, wr); } } template <typename I> void AbstractObjectWriteRequest<I>::send() { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << this->get_op_type() << " " << this->m_object_off << "~" << this->m_object_len << dendl; { std::shared_lock image_lock{image_ctx->image_lock}; if (image_ctx->object_map == nullptr) { m_object_may_exist = true; } else { // should have been flushed prior to releasing lock ceph_assert(image_ctx->exclusive_lock->is_lock_owner()); m_object_may_exist = image_ctx->object_map->object_may_exist( this->m_object_no); } } if (!m_object_may_exist && is_no_op_for_nonexistent_object()) { ldout(image_ctx->cct, 20) << "skipping no-op on nonexistent object" << dendl; this->async_finish(0); return; } pre_write_object_map_update(); } template <typename I> void AbstractObjectWriteRequest<I>::pre_write_object_map_update() { I *image_ctx = this->m_ictx; image_ctx->image_lock.lock_shared(); if (image_ctx->object_map == nullptr || !is_object_map_update_enabled()) { image_ctx->image_lock.unlock_shared(); write_object(); return; } if (!m_object_may_exist && m_copyup_enabled) { // optimization: copyup required image_ctx->image_lock.unlock_shared(); copyup(); return; } uint8_t new_state = this->get_pre_write_object_map_state(); ldout(image_ctx->cct, 20) << this->m_object_off << "~" << this->m_object_len << dendl; if (image_ctx->object_map->template aio_update< AbstractObjectWriteRequest<I>, &AbstractObjectWriteRequest<I>::handle_pre_write_object_map_update>( CEPH_NOSNAP, this->m_object_no, new_state, {}, this->m_trace, false, this)) { image_ctx->image_lock.unlock_shared(); return; } image_ctx->image_lock.unlock_shared(); write_object(); } template <typename I> void AbstractObjectWriteRequest<I>::handle_pre_write_object_map_update(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; if (r < 0) { lderr(image_ctx->cct) << "failed to update object map: " << cpp_strerror(r) << dendl; this->finish(r); return; } write_object(); } template <typename I> void AbstractObjectWriteRequest<I>::write_object() { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << dendl; neorados::WriteOp write_op; if (m_copyup_enabled) { if (m_guarding_migration_write) { auto snap_seq = (this->m_io_context->write_snap_context() ? this->m_io_context->write_snap_context()->first : 0); ldout(image_ctx->cct, 20) << "guarding write: snap_seq=" << snap_seq << dendl; cls_client::assert_snapc_seq( &write_op, snap_seq, cls::rbd::ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ); } else { ldout(image_ctx->cct, 20) << "guarding write" << dendl; write_op.assert_exists(); } } add_write_hint(&write_op); add_write_ops(&write_op); ceph_assert(write_op.size() != 0); image_ctx->rados_api.execute( {data_object_name(this->m_ictx, this->m_object_no)}, *this->m_io_context, std::move(write_op), librbd::asio::util::get_callback_adapter( [this](int r) { handle_write_object(r); }), nullptr, (this->m_trace.valid() ? this->m_trace.get_info() : nullptr)); } template <typename I> void AbstractObjectWriteRequest<I>::handle_write_object(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; r = filter_write_result(r); if (r == -ENOENT) { if (m_copyup_enabled) { copyup(); return; } } else if (r == -ERANGE && m_guarding_migration_write) { image_ctx->image_lock.lock_shared(); m_guarding_migration_write = !image_ctx->migration_info.empty(); image_ctx->image_lock.unlock_shared(); if (m_guarding_migration_write) { copyup(); } else { ldout(image_ctx->cct, 10) << "migration parent gone, restart io" << dendl; compute_parent_info(); write_object(); } return; } else if (r == -EILSEQ) { ldout(image_ctx->cct, 10) << "failed to write object" << dendl; this->finish(r); return; } else if (r < 0) { lderr(image_ctx->cct) << "failed to write object: " << cpp_strerror(r) << dendl; this->finish(r); return; } post_write_object_map_update(); } template <typename I> void AbstractObjectWriteRequest<I>::copyup() { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << dendl; ceph_assert(!m_copyup_in_progress); m_copyup_in_progress = true; image_ctx->copyup_list_lock.lock(); auto it = image_ctx->copyup_list.find(this->m_object_no); if (it == image_ctx->copyup_list.end()) { auto new_req = CopyupRequest<I>::create( image_ctx, this->m_object_no, std::move(this->m_parent_extents), m_image_area, this->m_trace); this->m_parent_extents.clear(); // make sure to wait on this CopyupRequest new_req->append_request(this, std::move(get_copyup_overwrite_extents())); image_ctx->copyup_list[this->m_object_no] = new_req; image_ctx->copyup_list_lock.unlock(); new_req->send(); } else { it->second->append_request(this, std::move(get_copyup_overwrite_extents())); image_ctx->copyup_list_lock.unlock(); } } template <typename I> void AbstractObjectWriteRequest<I>::handle_copyup(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; ceph_assert(m_copyup_in_progress); m_copyup_in_progress = false; if (r < 0 && r != -ERESTART) { lderr(image_ctx->cct) << "failed to copyup object: " << cpp_strerror(r) << dendl; this->finish(r); return; } if (r == -ERESTART || is_post_copyup_write_required()) { write_object(); return; } post_write_object_map_update(); } template <typename I> void AbstractObjectWriteRequest<I>::post_write_object_map_update() { I *image_ctx = this->m_ictx; image_ctx->image_lock.lock_shared(); if (image_ctx->object_map == nullptr || !is_object_map_update_enabled() || !is_non_existent_post_write_object_map_state()) { image_ctx->image_lock.unlock_shared(); this->finish(0); return; } ldout(image_ctx->cct, 20) << dendl; // should have been flushed prior to releasing lock ceph_assert(image_ctx->exclusive_lock->is_lock_owner()); if (image_ctx->object_map->template aio_update< AbstractObjectWriteRequest<I>, &AbstractObjectWriteRequest<I>::handle_post_write_object_map_update>( CEPH_NOSNAP, this->m_object_no, OBJECT_NONEXISTENT, OBJECT_PENDING, this->m_trace, false, this)) { image_ctx->image_lock.unlock_shared(); return; } image_ctx->image_lock.unlock_shared(); this->finish(0); } template <typename I> void AbstractObjectWriteRequest<I>::handle_post_write_object_map_update(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; if (r < 0) { lderr(image_ctx->cct) << "failed to update object map: " << cpp_strerror(r) << dendl; this->finish(r); return; } this->finish(0); } template <typename I> void ObjectWriteRequest<I>::add_write_hint(neorados::WriteOp* wr) { if ((m_write_flags & OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0) { wr->create(true); } else if (m_assert_version.has_value()) { wr->assert_version(m_assert_version.value()); } AbstractObjectWriteRequest<I>::add_write_hint(wr); } template <typename I> void ObjectWriteRequest<I>::add_write_ops(neorados::WriteOp* wr) { if (this->m_full_object) { wr->write_full(bufferlist{m_write_data}); } else { wr->write(this->m_object_off, bufferlist{m_write_data}); } util::apply_op_flags(m_op_flags, 0U, wr); } template <typename I> void ObjectDiscardRequest<I>::add_write_ops(neorados::WriteOp* wr) { switch (m_discard_action) { case DISCARD_ACTION_REMOVE: wr->remove(); break; case DISCARD_ACTION_REMOVE_TRUNCATE: wr->create(false); // fall through case DISCARD_ACTION_TRUNCATE: wr->truncate(this->m_object_off); break; case DISCARD_ACTION_ZERO: wr->zero(this->m_object_off, this->m_object_len); break; default: ceph_abort(); break; } } template <typename I> void ObjectWriteSameRequest<I>::add_write_ops(neorados::WriteOp* wr) { wr->writesame(this->m_object_off, this->m_object_len, bufferlist{m_write_data}); util::apply_op_flags(m_op_flags, 0U, wr); } template <typename I> void ObjectCompareAndWriteRequest<I>::add_write_ops(neorados::WriteOp* wr) { wr->cmpext(this->m_object_off, bufferlist{m_cmp_bl}, nullptr); if (this->m_full_object) { wr->write_full(bufferlist{m_write_bl}); } else { wr->write(this->m_object_off, bufferlist{m_write_bl}); } util::apply_op_flags(m_op_flags, 0U, wr); } template <typename I> int ObjectCompareAndWriteRequest<I>::filter_write_result(int r) const { if (r <= -MAX_ERRNO) { I *image_ctx = this->m_ictx; // object extent compare mismatch uint64_t offset = -MAX_ERRNO - r; auto [image_extents, _] = io::util::object_to_area_extents( image_ctx, this->m_object_no, {{offset, this->m_object_len}}); ceph_assert(image_extents.size() == 1); if (m_mismatch_offset) { *m_mismatch_offset = image_extents[0].first; } r = -EILSEQ; } return r; } template <typename I> ObjectListSnapsRequest<I>::ObjectListSnapsRequest( I *ictx, uint64_t objectno, Extents&& object_extents, SnapIds&& snap_ids, int list_snaps_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, Context *completion) : ObjectRequest<I>( ictx, objectno, ictx->duplicate_data_io_context(), "snap_list", parent_trace, completion), m_object_extents(std::move(object_extents)), m_snap_ids(std::move(snap_ids)), m_list_snaps_flags(list_snaps_flags), m_snapshot_delta(snapshot_delta) { this->m_io_context->read_snap(CEPH_SNAPDIR); } template <typename I> void ObjectListSnapsRequest<I>::send() { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << dendl; if (m_snap_ids.size() < 2) { lderr(image_ctx->cct) << "invalid snap ids: " << m_snap_ids << dendl; this->async_finish(-EINVAL); return; } list_snaps(); } template <typename I> void ObjectListSnapsRequest<I>::list_snaps() { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << dendl; neorados::ReadOp read_op; read_op.list_snaps(&m_snap_set, &m_ec); image_ctx->rados_api.execute( {data_object_name(this->m_ictx, this->m_object_no)}, *this->m_io_context, std::move(read_op), nullptr, librbd::asio::util::get_callback_adapter( [this](int r) { handle_list_snaps(r); }), nullptr, (this->m_trace.valid() ? this->m_trace.get_info() : nullptr)); } template <typename I> void ObjectListSnapsRequest<I>::handle_list_snaps(int r) { I *image_ctx = this->m_ictx; auto cct = image_ctx->cct; if (r >= 0) { r = -m_ec.value(); } ldout(cct, 20) << "r=" << r << dendl; m_snapshot_delta->clear(); auto& snapshot_delta = *m_snapshot_delta; ceph_assert(!m_snap_ids.empty()); librados::snap_t start_snap_id = 0; librados::snap_t first_snap_id = *m_snap_ids.begin(); librados::snap_t last_snap_id = *m_snap_ids.rbegin(); if (r == -ENOENT) { // the object does not exist -- mark the missing extents zero_extent(first_snap_id, true); list_from_parent(); return; } else if (r < 0) { lderr(cct) << "failed to retrieve object snapshot list: " << cpp_strerror(r) << dendl; this->finish(r); return; } // helper function requires the librados legacy data structure librados::snap_set_t snap_set; convert_snap_set(m_snap_set, &snap_set); bool initial_extents_written = false; interval_set<uint64_t> object_interval; for (auto& object_extent : m_object_extents) { object_interval.insert(object_extent.first, object_extent.second); } ldout(cct, 20) << "object_interval=" << object_interval << dendl; // loop through all expected snapshots and build interval sets for // data and zeroed ranges for each snapshot uint64_t prev_end_size = 0; interval_set<uint64_t> initial_written_extents; for (auto end_snap_id : m_snap_ids) { if (start_snap_id == end_snap_id) { continue; } else if (end_snap_id > last_snap_id) { break; } interval_set<uint64_t> diff; uint64_t end_size; bool exists; librados::snap_t clone_end_snap_id; bool read_whole_object; calc_snap_set_diff(cct, snap_set, start_snap_id, end_snap_id, &diff, &end_size, &exists, &clone_end_snap_id, &read_whole_object); if (read_whole_object || (!diff.empty() && ((m_list_snaps_flags & LIST_SNAPS_FLAG_WHOLE_OBJECT) != 0))) { ldout(cct, 1) << "need to read full object" << dendl; diff.clear(); diff.insert(0, image_ctx->layout.object_size); end_size = image_ctx->layout.object_size; clone_end_snap_id = end_snap_id; } else if (!exists) { end_size = 0; } if (exists) { // reads should be issued against the newest (existing) snapshot within // the associated snapshot object clone. writes should be issued // against the oldest snapshot in the snap_map. ceph_assert(clone_end_snap_id >= end_snap_id); if (clone_end_snap_id > last_snap_id) { // do not read past the copy point snapshot clone_end_snap_id = last_snap_id; } } // clip diff to current object extent interval_set<uint64_t> diff_interval; diff_interval.intersection_of(object_interval, diff); // clip diff to size of object (in case it was truncated) interval_set<uint64_t> zero_interval; if (end_size < prev_end_size) { zero_interval.insert(end_size, prev_end_size - end_size); zero_interval.intersection_of(object_interval); interval_set<uint64_t> trunc_interval; trunc_interval.intersection_of(zero_interval, diff_interval); if (!trunc_interval.empty()) { diff_interval.subtract(trunc_interval); ldout(cct, 20) << "clearing truncate diff: " << trunc_interval << dendl; } } ldout(cct, 20) << "start_snap_id=" << start_snap_id << ", " << "end_snap_id=" << end_snap_id << ", " << "clone_end_snap_id=" << clone_end_snap_id << ", " << "diff=" << diff << ", " << "diff_interval=" << diff_interval<< ", " << "zero_interval=" << zero_interval<< ", " << "end_size=" << end_size << ", " << "prev_end_size=" << prev_end_size << ", " << "exists=" << exists << ", " << "whole_object=" << read_whole_object << dendl; // check if object exists prior to start of incremental snap delta so that // we don't DNE the object if no additional deltas exist if (exists && start_snap_id == 0 && (!diff_interval.empty() || !zero_interval.empty())) { ldout(cct, 20) << "object exists at snap id " << end_snap_id << dendl; initial_extents_written = true; } prev_end_size = end_size; start_snap_id = end_snap_id; if (end_snap_id <= first_snap_id) { // don't include deltas from the starting snapshots, but we iterate over // it to track its existence and size ldout(cct, 20) << "skipping prior snapshot " << dendl; continue; } if (exists) { for (auto& interval : diff_interval) { snapshot_delta[{end_snap_id, clone_end_snap_id}].insert( interval.first, interval.second, SparseExtent(SPARSE_EXTENT_STATE_DATA, interval.second)); } } else { zero_interval.union_of(diff_interval); } if ((m_list_snaps_flags & LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS) == 0) { for (auto& interval : zero_interval) { snapshot_delta[{end_snap_id, end_snap_id}].insert( interval.first, interval.second, SparseExtent(SPARSE_EXTENT_STATE_ZEROED, interval.second)); } } } bool snapshot_delta_empty = snapshot_delta.empty(); if (!initial_extents_written) { zero_extent(first_snap_id, first_snap_id > 0); } ldout(cct, 20) << "snapshot_delta=" << snapshot_delta << dendl; if (snapshot_delta_empty) { list_from_parent(); return; } this->finish(0); } template <typename I> void ObjectListSnapsRequest<I>::list_from_parent() { I *image_ctx = this->m_ictx; auto cct = image_ctx->cct; ceph_assert(!m_snap_ids.empty()); librados::snap_t snap_id_start = *m_snap_ids.begin(); librados::snap_t snap_id_end = *m_snap_ids.rbegin(); std::unique_lock image_locker{image_ctx->image_lock}; if ((snap_id_start > 0) || (image_ctx->parent == nullptr) || ((m_list_snaps_flags & LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT) != 0)) { image_locker.unlock(); this->finish(0); return; } Extents parent_extents; uint64_t raw_overlap = 0; uint64_t object_overlap = 0; image_ctx->get_parent_overlap(snap_id_end, &raw_overlap); if (raw_overlap > 0) { // calculate reverse mapping onto the parent image std::tie(parent_extents, m_image_area) = io::util::object_to_area_extents( image_ctx, this->m_object_no, m_object_extents); object_overlap = image_ctx->prune_parent_extents( parent_extents, m_image_area, raw_overlap, false); } if (object_overlap == 0) { image_locker.unlock(); this->finish(0); return; } auto ctx = create_context_callback< ObjectListSnapsRequest<I>, &ObjectListSnapsRequest<I>::handle_list_from_parent>(this); auto aio_comp = AioCompletion::create_and_start( ctx, librbd::util::get_image_ctx(image_ctx->parent), AIO_TYPE_GENERIC); ldout(cct, 20) << "completion=" << aio_comp << " parent_extents=" << parent_extents << " area=" << m_image_area << dendl; auto list_snaps_flags = ( m_list_snaps_flags | LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS); ImageListSnapsRequest<I> req( *image_ctx->parent, aio_comp, std::move(parent_extents), m_image_area, {0, image_ctx->parent->snap_id}, list_snaps_flags, &m_parent_snapshot_delta, this->m_trace); req.send(); } template <typename I> void ObjectListSnapsRequest<I>::handle_list_from_parent(int r) { I *image_ctx = this->m_ictx; auto cct = image_ctx->cct; ldout(cct, 20) << "r=" << r << ", " << "parent_snapshot_delta=" << m_parent_snapshot_delta << dendl; // ignore special-case of fully empty dataset (we ignore zeroes) if (m_parent_snapshot_delta.empty()) { this->finish(0); return; } // the write/read snapshot id key is not useful for parent images so // map the special-case INITIAL_WRITE_READ_SNAP_IDS key *m_snapshot_delta = {}; auto& intervals = (*m_snapshot_delta)[INITIAL_WRITE_READ_SNAP_IDS]; for (auto& [key, image_extents] : m_parent_snapshot_delta) { for (auto image_extent : image_extents) { auto state = image_extent.get_val().state; // map image-extents back to this object striper::LightweightObjectExtents object_extents; io::util::area_to_object_extents(image_ctx, image_extent.get_off(), image_extent.get_len(), m_image_area, 0, &object_extents); for (auto& object_extent : object_extents) { ceph_assert(object_extent.object_no == this->m_object_no); intervals.insert( object_extent.offset, object_extent.length, {state, object_extent.length}); } } } ldout(cct, 20) << "snapshot_delta=" << *m_snapshot_delta << dendl; this->finish(0); } template <typename I> void ObjectListSnapsRequest<I>::zero_extent(uint64_t snap_id, bool dne) { I *image_ctx = this->m_ictx; auto cct = image_ctx->cct; // the object does not exist or is (partially) under whiteout -- mark the // missing extents which would be any portion of the object that does not // have data in the initial snapshot set if ((m_list_snaps_flags & LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS) == 0) { interval_set<uint64_t> interval; for (auto [object_offset, object_length] : m_object_extents) { interval.insert(object_offset, object_length); } for (auto [offset, length] : interval) { ldout(cct, 20) << "snapshot " << snap_id << ": " << (dne ? "DNE" : "zeroed") << " extent " << offset << "~" << length << dendl; (*m_snapshot_delta)[{snap_id, snap_id}].insert( offset, length, SparseExtent( (dne ? SPARSE_EXTENT_STATE_DNE : SPARSE_EXTENT_STATE_ZEROED), length)); } } } } // namespace io } // namespace librbd template class librbd::io::ObjectRequest<librbd::ImageCtx>; template class librbd::io::ObjectReadRequest<librbd::ImageCtx>; template class librbd::io::AbstractObjectWriteRequest<librbd::ImageCtx>; template class librbd::io::ObjectWriteRequest<librbd::ImageCtx>; template class librbd::io::ObjectDiscardRequest<librbd::ImageCtx>; template class librbd::io::ObjectWriteSameRequest<librbd::ImageCtx>; template class librbd::io::ObjectCompareAndWriteRequest<librbd::ImageCtx>; template class librbd::io::ObjectListSnapsRequest<librbd::ImageCtx>;
34,895
31.49162
80
cc
null
ceph-main/src/librbd/io/ObjectRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_OBJECT_REQUEST_H #define CEPH_LIBRBD_IO_OBJECT_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/neorados/RADOS.hpp" #include "include/rados/librados.hpp" #include "common/zipkin_trace.h" #include "librbd/ObjectMap.h" #include "librbd/Types.h" #include "librbd/io/Types.h" #include <map> class Context; class ObjectExtent; namespace neorados { struct WriteOp; } namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename> class CopyupRequest; /** * This class represents an I/O operation to a single RBD data object. * Its subclasses encapsulate logic for dealing with special cases * for I/O due to layering. */ template <typename ImageCtxT = ImageCtx> class ObjectRequest { public: static ObjectRequest* create_write( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, Context *completion); static ObjectRequest* create_discard( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, Context *completion); static ObjectRequest* create_write_same( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, Context *completion); static ObjectRequest* create_compare_and_write( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, Context *completion); ObjectRequest(ImageCtxT *ictx, uint64_t objectno, IOContext io_context, const char *trace_name, const ZTracer::Trace &parent_trace, Context *completion); virtual ~ObjectRequest() { m_trace.event("finish"); } static void add_write_hint(ImageCtxT& image_ctx, neorados::WriteOp *wr); virtual void send() = 0; bool has_parent() const { return m_has_parent; } virtual const char *get_op_type() const = 0; protected: bool compute_parent_extents(Extents *parent_extents, ImageArea *area, bool read_request); ImageCtxT *m_ictx; uint64_t m_object_no; IOContext m_io_context; Context *m_completion; ZTracer::Trace m_trace; void async_finish(int r); void finish(int r); private: bool m_has_parent = false; }; template <typename ImageCtxT = ImageCtx> class ObjectReadRequest : public ObjectRequest<ImageCtxT> { public: static ObjectReadRequest* create( ImageCtxT *ictx, uint64_t objectno, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, Context *completion) { return new ObjectReadRequest(ictx, objectno, extents, io_context, op_flags, read_flags, parent_trace, version, completion); } ObjectReadRequest( ImageCtxT *ictx, uint64_t objectno, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, Context *completion); void send() override; const char *get_op_type() const override { return "read"; } private: /** * @verbatim * * <start> * | * | * v * READ_OBJECT * | * v (skip if not needed) * READ_PARENT * | * v (skip if not needed) * COPYUP * | * v * <finish> * * @endverbatim */ ReadExtents* m_extents; int m_op_flags; int m_read_flags; uint64_t* m_version; void read_object(); void handle_read_object(int r); void read_parent(); void handle_read_parent(int r); void copyup(); }; template <typename ImageCtxT = ImageCtx> class AbstractObjectWriteRequest : public ObjectRequest<ImageCtxT> { public: AbstractObjectWriteRequest( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, uint64_t len, IOContext io_context, const char *trace_name, const ZTracer::Trace &parent_trace, Context *completion); virtual bool is_empty_write_op() const { return false; } virtual uint8_t get_pre_write_object_map_state() const { return OBJECT_EXISTS; } virtual void add_copyup_ops(neorados::WriteOp *wr) { add_write_ops(wr); } void handle_copyup(int r); void send() override; protected: uint64_t m_object_off; uint64_t m_object_len; bool m_full_object = false; bool m_copyup_enabled = true; virtual bool is_no_op_for_nonexistent_object() const { return false; } virtual bool is_object_map_update_enabled() const { return true; } virtual bool is_post_copyup_write_required() const { return false; } virtual bool is_non_existent_post_write_object_map_state() const { return false; } virtual void add_write_hint(neorados::WriteOp *wr); virtual void add_write_ops(neorados::WriteOp *wr) = 0; virtual int filter_write_result(int r) const { return r; } virtual Extents get_copyup_overwrite_extents() const { return {{m_object_off, m_object_len}}; } private: /** * @verbatim * * <start> * | * v (no-op write request) * DETECT_NO_OP . . . . . . . . . . . . . . . . . . . * | . * v (skip if not required/disabled) . * PRE_UPDATE_OBJECT_MAP . * | . . * | . (child dne) . * | . . . . . . . . . . * | . . * | (post-copyup write) . . * | . . . . . . . . . . . . . . * | . . . . * v v . v . * WRITE . . . . . . . . > COPYUP (if required) . * | | . * |/----------------------/ . * | . * v (skip if not required/disabled) . * POST_UPDATE_OBJECT_MAP . * | . * v . * <finish> < . . . . . . . . . . . . . . . . . . . . * * @endverbatim */ Extents m_parent_extents; ImageArea m_image_area = ImageArea::DATA; bool m_object_may_exist = false; bool m_copyup_in_progress = false; bool m_guarding_migration_write = false; void compute_parent_info(); void pre_write_object_map_update(); void handle_pre_write_object_map_update(int r); void write_object(); void handle_write_object(int r); void copyup(); void post_write_object_map_update(); void handle_post_write_object_map_update(int r); }; template <typename ImageCtxT = ImageCtx> class ObjectWriteRequest : public AbstractObjectWriteRequest<ImageCtxT> { public: ObjectWriteRequest( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, Context *completion) : AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off, data.length(), io_context, "write", parent_trace, completion), m_write_data(std::move(data)), m_op_flags(op_flags), m_write_flags(write_flags), m_assert_version(assert_version) { } bool is_empty_write_op() const override { return (m_write_data.length() == 0); } const char *get_op_type() const override { return "write"; } protected: void add_write_ops(neorados::WriteOp *wr) override; void add_write_hint(neorados::WriteOp *wr) override; private: ceph::bufferlist m_write_data; int m_op_flags; int m_write_flags; std::optional<uint64_t> m_assert_version; }; template <typename ImageCtxT = ImageCtx> class ObjectDiscardRequest : public AbstractObjectWriteRequest<ImageCtxT> { public: ObjectDiscardRequest( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, Context *completion) : AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off, object_len, io_context, "discard", parent_trace, completion), m_discard_flags(discard_flags) { if (this->m_full_object) { if ((m_discard_flags & OBJECT_DISCARD_FLAG_DISABLE_CLONE_REMOVE) != 0 && this->has_parent()) { if (!this->m_copyup_enabled) { // need to hide the parent object instead of child object m_discard_action = DISCARD_ACTION_REMOVE_TRUNCATE; } else { m_discard_action = DISCARD_ACTION_TRUNCATE; } } else { m_discard_action = DISCARD_ACTION_REMOVE; } } else if (object_off + object_len == ictx->layout.object_size) { m_discard_action = DISCARD_ACTION_TRUNCATE; } else { m_discard_action = DISCARD_ACTION_ZERO; } } const char* get_op_type() const override { switch (m_discard_action) { case DISCARD_ACTION_REMOVE: return "remove"; case DISCARD_ACTION_REMOVE_TRUNCATE: return "remove (create+truncate)"; case DISCARD_ACTION_TRUNCATE: return "truncate"; case DISCARD_ACTION_ZERO: return "zero"; } ceph_abort(); return nullptr; } uint8_t get_pre_write_object_map_state() const override { if (m_discard_action == DISCARD_ACTION_REMOVE) { return OBJECT_PENDING; } return OBJECT_EXISTS; } protected: bool is_no_op_for_nonexistent_object() const override { return (!this->has_parent()); } bool is_object_map_update_enabled() const override { return ( (m_discard_flags & OBJECT_DISCARD_FLAG_DISABLE_OBJECT_MAP_UPDATE) == 0); } bool is_non_existent_post_write_object_map_state() const override { return (m_discard_action == DISCARD_ACTION_REMOVE); } void add_write_hint(neorados::WriteOp *wr) override { // no hint for discard } void add_write_ops(neorados::WriteOp *wr) override; private: enum DiscardAction { DISCARD_ACTION_REMOVE, DISCARD_ACTION_REMOVE_TRUNCATE, DISCARD_ACTION_TRUNCATE, DISCARD_ACTION_ZERO }; DiscardAction m_discard_action; int m_discard_flags; }; template <typename ImageCtxT = ImageCtx> class ObjectWriteSameRequest : public AbstractObjectWriteRequest<ImageCtxT> { public: ObjectWriteSameRequest( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, Context *completion) : AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off, object_len, io_context, "writesame", parent_trace, completion), m_write_data(std::move(data)), m_op_flags(op_flags) { } const char *get_op_type() const override { return "writesame"; } protected: void add_write_ops(neorados::WriteOp *wr) override; private: ceph::bufferlist m_write_data; int m_op_flags; }; template <typename ImageCtxT = ImageCtx> class ObjectCompareAndWriteRequest : public AbstractObjectWriteRequest<ImageCtxT> { public: ObjectCompareAndWriteRequest( ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_bl, ceph::bufferlist&& write_bl, IOContext io_context, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, Context *completion) : AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off, cmp_bl.length(), io_context, "compare_and_write", parent_trace, completion), m_cmp_bl(std::move(cmp_bl)), m_write_bl(std::move(write_bl)), m_mismatch_offset(mismatch_offset), m_op_flags(op_flags) { } const char *get_op_type() const override { return "compare_and_write"; } void add_copyup_ops(neorados::WriteOp *wr) override { // no-op on copyup } protected: virtual bool is_post_copyup_write_required() const { return true; } void add_write_ops(neorados::WriteOp *wr) override; int filter_write_result(int r) const override; Extents get_copyup_overwrite_extents() const override { return {}; } private: ceph::bufferlist m_cmp_bl; ceph::bufferlist m_write_bl; uint64_t *m_mismatch_offset; int m_op_flags; }; template <typename ImageCtxT = ImageCtx> class ObjectListSnapsRequest : public ObjectRequest<ImageCtxT> { public: static ObjectListSnapsRequest* create( ImageCtxT *ictx, uint64_t objectno, Extents&& object_extents, SnapIds&& snap_ids, int list_snaps_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, Context *completion) { return new ObjectListSnapsRequest(ictx, objectno, std::move(object_extents), std::move(snap_ids), list_snaps_flags, parent_trace, snapshot_delta, completion); } ObjectListSnapsRequest( ImageCtxT *ictx, uint64_t objectno, Extents&& object_extents, SnapIds&& snap_ids, int list_snaps_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, Context *completion); void send() override; const char *get_op_type() const override { return "snap_list"; } private: Extents m_object_extents; SnapIds m_snap_ids; int m_list_snaps_flags; SnapshotDelta* m_snapshot_delta; neorados::SnapSet m_snap_set; boost::system::error_code m_ec; ImageArea m_image_area = ImageArea::DATA; SnapshotDelta m_parent_snapshot_delta; void list_snaps(); void handle_list_snaps(int r); void list_from_parent(); void handle_list_from_parent(int r); void zero_extent(uint64_t snap_id, bool dne); }; } // namespace io } // namespace librbd extern template class librbd::io::ObjectRequest<librbd::ImageCtx>; extern template class librbd::io::ObjectReadRequest<librbd::ImageCtx>; extern template class librbd::io::AbstractObjectWriteRequest<librbd::ImageCtx>; extern template class librbd::io::ObjectWriteRequest<librbd::ImageCtx>; extern template class librbd::io::ObjectDiscardRequest<librbd::ImageCtx>; extern template class librbd::io::ObjectWriteSameRequest<librbd::ImageCtx>; extern template class librbd::io::ObjectCompareAndWriteRequest<librbd::ImageCtx>; extern template class librbd::io::ObjectListSnapsRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_OBJECT_REQUEST_H
15,444
29.523715
83
h
null
ceph-main/src/librbd/io/QosImageDispatch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/QosImageDispatch.h" #include "common/dout.h" #include "librbd/AsioEngine.h" #include "librbd/ImageCtx.h" #include "librbd/io/FlushTracker.h" #include <utility> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::QosImageDispatch: " << this << " " \ << __func__ << ": " namespace librbd { namespace io { namespace { uint64_t get_extent_length(const Extents& extents) { uint64_t length = 0; for (auto& extent : extents) { length += extent.second; } return length; } uint64_t calculate_tokens(bool read_op, uint64_t extent_length, uint64_t flag) { if (read_op && ((flag & IMAGE_DISPATCH_FLAG_QOS_WRITE_MASK) != 0)) { return 0; } else if (!read_op && ((flag & IMAGE_DISPATCH_FLAG_QOS_READ_MASK) != 0)) { return 0; } return (((flag & IMAGE_DISPATCH_FLAG_QOS_BPS_MASK) != 0) ? extent_length : 1); } static const std::pair<uint64_t, const char*> throttle_flags[] = { {IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE, "rbd_qos_iops_throttle" }, {IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE, "rbd_qos_bps_throttle" }, {IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE, "rbd_qos_read_iops_throttle" }, {IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE, "rbd_qos_write_iops_throttle" }, {IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE, "rbd_qos_read_bps_throttle" }, {IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE, "rbd_qos_write_bps_throttle" } }; } // anonymous namespace template <typename I> QosImageDispatch<I>::QosImageDispatch(I* image_ctx) : m_image_ctx(image_ctx), m_flush_tracker(new FlushTracker<I>(image_ctx)) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "ictx=" << image_ctx << dendl; SafeTimer *timer; ceph::mutex *timer_lock; ImageCtx::get_timer_instance(cct, &timer, &timer_lock); for (auto [flag, name] : throttle_flags) { m_throttles.emplace_back( flag, new TokenBucketThrottle(cct, name, 0, 0, timer, timer_lock)); } } template <typename I> QosImageDispatch<I>::~QosImageDispatch() { for (auto t : m_throttles) { delete t.second; } } template <typename I> void QosImageDispatch<I>::shut_down(Context* on_finish) { m_flush_tracker->shut_down(); on_finish->complete(0); } template <typename I> void QosImageDispatch<I>::apply_qos_schedule_tick_min(uint64_t tick) { for (auto pair : m_throttles) { pair.second->set_schedule_tick_min(tick); } } template <typename I> void QosImageDispatch<I>::apply_qos_limit(uint64_t flag, uint64_t limit, uint64_t burst, uint64_t burst_seconds) { auto cct = m_image_ctx->cct; TokenBucketThrottle *throttle = nullptr; for (auto pair : m_throttles) { if (flag == pair.first) { throttle = pair.second; break; } } ceph_assert(throttle != nullptr); int r = throttle->set_limit(limit, burst, burst_seconds); if (r < 0) { lderr(cct) << throttle->get_name() << ": invalid qos parameter: " << "burst(" << burst << ") is less than " << "limit(" << limit << ")" << dendl; // if apply failed, we should at least make sure the limit works. throttle->set_limit(limit, 0, 1); } if (limit) { m_qos_enabled_flag |= flag; } else { m_qos_enabled_flag &= ~flag; } } template <typename I> void QosImageDispatch<I>::apply_qos_exclude_ops(uint64_t exclude_ops) { m_qos_exclude_ops = exclude_ops; } template <typename I> bool QosImageDispatch<I>::read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (m_qos_exclude_ops & RBD_IO_OPERATION_READ) { return false; } if (needs_throttle(true, image_extents, tid, image_dispatch_flags, dispatch_result, on_finish, on_dispatched)) { return true; } return false; } template <typename I> bool QosImageDispatch<I>::write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (m_qos_exclude_ops & RBD_IO_OPERATION_WRITE) { return false; } if (needs_throttle(false, image_extents, tid, image_dispatch_flags, dispatch_result, on_finish, on_dispatched)) { return true; } return false; } template <typename I> bool QosImageDispatch<I>::discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (m_qos_exclude_ops & RBD_IO_OPERATION_DISCARD) { return false; } if (needs_throttle(false, image_extents, tid, image_dispatch_flags, dispatch_result, on_finish, on_dispatched)) { return true; } return false; } template <typename I> bool QosImageDispatch<I>::write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (m_qos_exclude_ops & RBD_IO_OPERATION_WRITE_SAME) { return false; } if (needs_throttle(false, image_extents, tid, image_dispatch_flags, dispatch_result, on_finish, on_dispatched)) { return true; } return false; } template <typename I> bool QosImageDispatch<I>::compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (m_qos_exclude_ops & RBD_IO_OPERATION_COMPARE_AND_WRITE) { return false; } if (needs_throttle(false, image_extents, tid, image_dispatch_flags, dispatch_result, on_finish, on_dispatched)) { return true; } return false; } template <typename I> bool QosImageDispatch<I>::flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; *dispatch_result = DISPATCH_RESULT_CONTINUE; m_flush_tracker->flush(on_dispatched); return true; } template <typename I> void QosImageDispatch<I>::handle_finished(int r, uint64_t tid) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; m_flush_tracker->finish_io(tid); } template <typename I> bool QosImageDispatch<I>::set_throttle_flag( std::atomic<uint32_t>* image_dispatch_flags, uint32_t flag) { uint32_t expected = image_dispatch_flags->load(); uint32_t desired; do { desired = expected | flag; } while (!image_dispatch_flags->compare_exchange_weak(expected, desired)); return ((desired & IMAGE_DISPATCH_FLAG_QOS_MASK) == IMAGE_DISPATCH_FLAG_QOS_MASK); } template <typename I> bool QosImageDispatch<I>::needs_throttle( bool read_op, const Extents& image_extents, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; auto extent_length = get_extent_length(image_extents); bool all_qos_flags_set = false; if (!read_op) { m_flush_tracker->start_io(tid); *on_finish = new LambdaContext([this, tid, on_finish=*on_finish](int r) { handle_finished(r, tid); on_finish->complete(r); }); } *dispatch_result = DISPATCH_RESULT_CONTINUE; auto qos_enabled_flag = m_qos_enabled_flag; for (auto [flag, throttle] : m_throttles) { if ((qos_enabled_flag & flag) == 0) { all_qos_flags_set = set_throttle_flag(image_dispatch_flags, flag); continue; } auto tokens = calculate_tokens(read_op, extent_length, flag); if (tokens > 0 && throttle->get(tokens, this, &QosImageDispatch<I>::handle_throttle_ready, Tag{image_dispatch_flags, on_dispatched}, flag)) { ldout(cct, 15) << "on_dispatched=" << on_dispatched << ", " << "flag=" << flag << dendl; all_qos_flags_set = false; } else { all_qos_flags_set = set_throttle_flag(image_dispatch_flags, flag); } } return !all_qos_flags_set; } template <typename I> void QosImageDispatch<I>::handle_throttle_ready(Tag&& tag, uint64_t flag) { auto cct = m_image_ctx->cct; ldout(cct, 15) << "on_dispatched=" << tag.on_dispatched << ", " << "flag=" << flag << dendl; if (set_throttle_flag(tag.image_dispatch_flags, flag)) { // timer_lock is held -- so dispatch from outside the timer thread m_image_ctx->asio_engine->post(tag.on_dispatched, 0); } } } // namespace io } // namespace librbd template class librbd::io::QosImageDispatch<librbd::ImageCtx>;
10,317
30.361702
83
cc
null
ceph-main/src/librbd/io/QosImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_QOS_IMAGE_DISPATCH_H #define CEPH_LIBRBD_IO_QOS_IMAGE_DISPATCH_H #include <list> #include <memory> #include "librbd/io/ImageDispatchInterface.h" #include "include/int_types.h" #include "include/buffer.h" #include "common/zipkin_trace.h" #include "common/Throttle.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename> class FlushTracker; template <typename ImageCtxT> class QosImageDispatch : public ImageDispatchInterface { public: struct Tag { std::atomic<uint32_t>* image_dispatch_flags; Context* on_dispatched; Tag(std::atomic<uint32_t>* image_dispatch_flags, Context* on_dispatched) : image_dispatch_flags(image_dispatch_flags), on_dispatched(on_dispatched) { } }; QosImageDispatch(ImageCtxT* image_ctx); ~QosImageDispatch() override; ImageDispatchLayer get_dispatch_layer() const override { return IMAGE_DISPATCH_LAYER_QOS; } void shut_down(Context* on_finish) override; void apply_qos_schedule_tick_min(uint64_t tick); void apply_qos_limit(uint64_t flag, uint64_t limit, uint64_t burst, uint64_t burst_seconds); void apply_qos_exclude_ops(uint64_t exclude_ops); bool read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } private: ImageCtxT* m_image_ctx; std::list<std::pair<uint64_t, TokenBucketThrottle*> > m_throttles; uint64_t m_qos_enabled_flag = 0; uint64_t m_qos_exclude_ops = 0; std::unique_ptr<FlushTracker<ImageCtxT>> m_flush_tracker; void handle_finished(int r, uint64_t tid); bool set_throttle_flag(std::atomic<uint32_t>* image_dispatch_flags, uint32_t flag); bool needs_throttle(bool read_op, const Extents& image_extents, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched); void handle_throttle_ready(Tag&& tag, uint64_t flag); }; } // namespace io } // namespace librbd extern template class librbd::io::QosImageDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_QOS_IMAGE_DISPATCH_H
4,696
33.536765
79
h
null
ceph-main/src/librbd/io/QueueImageDispatch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/QueueImageDispatch.h" #include "common/dout.h" #include "common/Cond.h" #include "librbd/AsioEngine.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/FlushTracker.h" #include "librbd/io/ImageDispatchSpec.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::QueueImageDispatch: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { template <typename I> QueueImageDispatch<I>::QueueImageDispatch(I* image_ctx) : m_image_ctx(image_ctx), m_flush_tracker(new FlushTracker<I>(image_ctx)) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "ictx=" << image_ctx << dendl; } template <typename I> QueueImageDispatch<I>::~QueueImageDispatch() { delete m_flush_tracker; } template <typename I> void QueueImageDispatch<I>::shut_down(Context* on_finish) { m_flush_tracker->shut_down(); on_finish->complete(0); } template <typename I> bool QueueImageDispatch<I>::read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; return enqueue(true, tid, dispatch_result, on_finish, on_dispatched); } template <typename I> bool QueueImageDispatch<I>::write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; return enqueue(false, tid, dispatch_result, on_finish, on_dispatched); } template <typename I> bool QueueImageDispatch<I>::discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; return enqueue(false, tid, dispatch_result, on_finish, on_dispatched); } template <typename I> bool QueueImageDispatch<I>::write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; return enqueue(false, tid, dispatch_result, on_finish, on_dispatched); } template <typename I> bool QueueImageDispatch<I>::compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; return enqueue(false, tid, dispatch_result, on_finish, on_dispatched); } template <typename I> bool QueueImageDispatch<I>::flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; *dispatch_result = DISPATCH_RESULT_CONTINUE; m_flush_tracker->flush(on_dispatched); return true; } template <typename I> void QueueImageDispatch<I>::handle_finished(int r, uint64_t tid) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; m_flush_tracker->finish_io(tid); } template <typename I> bool QueueImageDispatch<I>::enqueue( bool read_op, uint64_t tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { if (!m_image_ctx->non_blocking_aio) { return false; } if (!read_op) { m_flush_tracker->start_io(tid); *on_finish = new LambdaContext([this, tid, on_finish=*on_finish](int r) { handle_finished(r, tid); on_finish->complete(r); }); } *dispatch_result = DISPATCH_RESULT_CONTINUE; m_image_ctx->asio_engine->post(on_dispatched, 0); return true; } } // namespace io } // namespace librbd template class librbd::io::QueueImageDispatch<librbd::ImageCtx>;
5,004
31.290323
79
cc
null
ceph-main/src/librbd/io/QueueImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_QUEUE_IMAGE_DISPATCH_H #define CEPH_LIBRBD_IO_QUEUE_IMAGE_DISPATCH_H #include "librbd/io/ImageDispatchInterface.h" #include "include/int_types.h" #include "include/buffer.h" #include "common/zipkin_trace.h" #include "common/Throttle.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" #include <list> #include <set> struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename> class FlushTracker; template <typename ImageCtxT> class QueueImageDispatch : public ImageDispatchInterface { public: QueueImageDispatch(ImageCtxT* image_ctx); ~QueueImageDispatch(); ImageDispatchLayer get_dispatch_layer() const override { return IMAGE_DISPATCH_LAYER_QUEUE; } void shut_down(Context* on_finish) override; bool read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } private: ImageCtxT* m_image_ctx; FlushTracker<ImageCtxT>* m_flush_tracker; void handle_finished(int r, uint64_t tid); bool enqueue(bool read_op, uint64_t tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched); }; } // namespace io } // namespace librbd extern template class librbd::io::QueueImageDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_QUEUE_IMAGE_DISPATCH_H
3,749
32.783784
77
h
null
ceph-main/src/librbd/io/ReadResult.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/ReadResult.h" #include "include/buffer.h" #include "common/dout.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/Utils.h" #include <boost/variant/apply_visitor.hpp> #include <boost/variant/static_visitor.hpp> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::ReadResult: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { struct ReadResult::SetImageExtentsVisitor : public boost::static_visitor<void> { Extents image_extents; explicit SetImageExtentsVisitor(const Extents& image_extents) : image_extents(image_extents) { } void operator()(Linear &linear) const { uint64_t length = util::get_extents_length(image_extents); ceph_assert(length <= linear.buf_len); linear.buf_len = length; } void operator()(SparseBufferlist &sbl) const { sbl.image_extents = image_extents; } template <typename T> void operator()(T &t) const { } }; struct ReadResult::AssembleResultVisitor : public boost::static_visitor<void> { CephContext *cct; Striper::StripedReadResult &destriper; AssembleResultVisitor(CephContext *cct, Striper::StripedReadResult &destriper) : cct(cct), destriper(destriper) { } void operator()(Empty &empty) const { ldout(cct, 20) << "dropping read result" << dendl; } void operator()(Linear &linear) const { ldout(cct, 20) << "copying resulting bytes to " << reinterpret_cast<void*>(linear.buf) << dendl; destriper.assemble_result(cct, linear.buf, linear.buf_len); } void operator()(Vector &vector) const { bufferlist bl; destriper.assemble_result(cct, bl, true); ldout(cct, 20) << "copying resulting " << bl.length() << " bytes to iovec " << reinterpret_cast<const void*>(vector.iov) << dendl; bufferlist::iterator it = bl.begin(); size_t length = bl.length(); size_t offset = 0; int idx = 0; for (; offset < length && idx < vector.iov_count; idx++) { size_t len = std::min(vector.iov[idx].iov_len, length - offset); it.copy(len, static_cast<char *>(vector.iov[idx].iov_base)); offset += len; } ceph_assert(offset == bl.length()); } void operator()(Bufferlist &bufferlist) const { bufferlist.bl->clear(); destriper.assemble_result(cct, *bufferlist.bl, true); ldout(cct, 20) << "moved resulting " << bufferlist.bl->length() << " " << "bytes to bl " << reinterpret_cast<void*>(bufferlist.bl) << dendl; } void operator()(SparseBufferlist &sparse_bufferlist) const { sparse_bufferlist.bl->clear(); ExtentMap buffer_extent_map; auto buffer_extents_length = destriper.assemble_result( cct, &buffer_extent_map, sparse_bufferlist.bl); ldout(cct, 20) << "image_extents=" << sparse_bufferlist.image_extents << ", " << "buffer_extent_map=" << buffer_extent_map << dendl; sparse_bufferlist.extent_map->clear(); sparse_bufferlist.extent_map->reserve(buffer_extent_map.size()); // The extent-map is logically addressed by buffer-extents not image- or // object-extents. Translate this address mapping to image-extent // logical addressing since it's tied to an image-extent read uint64_t buffer_offset = 0; auto bem_it = buffer_extent_map.begin(); for (auto [image_offset, image_length] : sparse_bufferlist.image_extents) { while (bem_it != buffer_extent_map.end()) { auto [buffer_extent_offset, buffer_extent_length] = *bem_it; if (buffer_offset + image_length <= buffer_extent_offset) { // skip any image extent that is not included in the results break; } // current buffer-extent should be within the current image-extent ceph_assert(buffer_offset <= buffer_extent_offset && buffer_offset + image_length >= buffer_extent_offset + buffer_extent_length); auto image_extent_offset = image_offset + (buffer_extent_offset - buffer_offset); ldout(cct, 20) << "mapping buffer extent " << buffer_extent_offset << "~" << buffer_extent_length << " to image extent " << image_extent_offset << "~" << buffer_extent_length << dendl; sparse_bufferlist.extent_map->emplace_back( image_extent_offset, buffer_extent_length); ++bem_it; } buffer_offset += image_length; } ceph_assert(buffer_offset == buffer_extents_length); ceph_assert(bem_it == buffer_extent_map.end()); ldout(cct, 20) << "moved resulting " << *sparse_bufferlist.extent_map << " extents of total " << sparse_bufferlist.bl->length() << " bytes to bl " << reinterpret_cast<void*>(sparse_bufferlist.bl) << dendl; } }; ReadResult::C_ImageReadRequest::C_ImageReadRequest( AioCompletion *aio_completion, uint64_t buffer_offset, const Extents image_extents) : aio_completion(aio_completion), buffer_offset(buffer_offset), image_extents(image_extents) { aio_completion->add_request(); } void ReadResult::C_ImageReadRequest::finish(int r) { CephContext *cct = aio_completion->ictx->cct; ldout(cct, 10) << "C_ImageReadRequest: r=" << r << dendl; if (r >= 0 || (ignore_enoent && r == -ENOENT)) { striper::LightweightBufferExtents buffer_extents; size_t length = 0; for (auto &image_extent : image_extents) { buffer_extents.emplace_back(buffer_offset + length, image_extent.second); length += image_extent.second; } ceph_assert(r == -ENOENT || length == bl.length()); aio_completion->lock.lock(); aio_completion->read_result.m_destriper.add_partial_result( cct, std::move(bl), buffer_extents); aio_completion->lock.unlock(); r = length; } aio_completion->complete_request(r); } ReadResult::C_ObjectReadRequest::C_ObjectReadRequest( AioCompletion *aio_completion, ReadExtents&& extents) : aio_completion(aio_completion), extents(std::move(extents)) { aio_completion->add_request(); } void ReadResult::C_ObjectReadRequest::finish(int r) { CephContext *cct = aio_completion->ictx->cct; ldout(cct, 10) << "C_ObjectReadRequest: r=" << r << dendl; if (r == -ENOENT) { r = 0; } if (r >= 0) { uint64_t object_len = 0; aio_completion->lock.lock(); for (auto& extent: extents) { ldout(cct, 10) << " got " << extent.extent_map << " for " << extent.buffer_extents << " bl " << extent.bl.length() << dendl; aio_completion->read_result.m_destriper.add_partial_sparse_result( cct, std::move(extent.bl), extent.extent_map, extent.offset, extent.buffer_extents); object_len += extent.length; } aio_completion->lock.unlock(); r = object_len; } aio_completion->complete_request(r); } ReadResult::C_ObjectReadMergedExtents::C_ObjectReadMergedExtents( CephContext* cct, ReadExtents* extents, Context* on_finish) : cct(cct), extents(extents), on_finish(on_finish) { } void ReadResult::C_ObjectReadMergedExtents::finish(int r) { if (r >= 0) { for (auto& extent: *extents) { if (bl.length() < extent.length) { lderr(cct) << "Merged extents length is less than expected" << dendl; r = -EIO; break; } bl.splice(0, extent.length, &extent.bl); } if (bl.length() != 0) { lderr(cct) << "Merged extents length is greater than expected" << dendl; r = -EIO; } } on_finish->complete(r); } ReadResult::ReadResult() : m_buffer(Empty()) { } ReadResult::ReadResult(char *buf, size_t buf_len) : m_buffer(Linear(buf, buf_len)) { } ReadResult::ReadResult(const struct iovec *iov, int iov_count) : m_buffer(Vector(iov, iov_count)) { } ReadResult::ReadResult(ceph::bufferlist *bl) : m_buffer(Bufferlist(bl)) { } ReadResult::ReadResult(Extents* extent_map, ceph::bufferlist* bl) : m_buffer(SparseBufferlist(extent_map, bl)) { } void ReadResult::set_image_extents(const Extents& image_extents) { boost::apply_visitor(SetImageExtentsVisitor(image_extents), m_buffer); } void ReadResult::assemble_result(CephContext *cct) { boost::apply_visitor(AssembleResultVisitor(cct, m_destriper), m_buffer); } } // namespace io } // namespace librbd
8,592
31.673004
80
cc
null
ceph-main/src/librbd/io/ReadResult.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_READ_RESULT_H #define CEPH_LIBRBD_IO_READ_RESULT_H #include "include/common_fwd.h" #include "include/int_types.h" #include "include/buffer_fwd.h" #include "include/Context.h" #include "librbd/io/Types.h" #include "osdc/Striper.h" #include <sys/uio.h> #include <boost/variant/variant.hpp> namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename> struct ObjectReadRequest; class ReadResult { public: struct C_ImageReadRequest : public Context { AioCompletion *aio_completion; uint64_t buffer_offset = 0; Extents image_extents; bufferlist bl; bool ignore_enoent = false; C_ImageReadRequest(AioCompletion *aio_completion, uint64_t buffer_offset, const Extents image_extents); void finish(int r) override; }; struct C_ObjectReadRequest : public Context { AioCompletion *aio_completion; ReadExtents extents; C_ObjectReadRequest(AioCompletion *aio_completion, ReadExtents&& extents); void finish(int r) override; }; struct C_ObjectReadMergedExtents : public Context { CephContext* cct; ReadExtents* extents; Context *on_finish; bufferlist bl; C_ObjectReadMergedExtents(CephContext* cct, ReadExtents* extents, Context* on_finish); void finish(int r) override; }; ReadResult(); ReadResult(char *buf, size_t buf_len); ReadResult(const struct iovec *iov, int iov_count); ReadResult(ceph::bufferlist *bl); ReadResult(Extents* extent_map, ceph::bufferlist* bl); void set_image_extents(const Extents& image_extents); void assemble_result(CephContext *cct); private: struct Empty { }; struct Linear { char *buf; size_t buf_len; Linear(char *buf, size_t buf_len) : buf(buf), buf_len(buf_len) { } }; struct Vector { const struct iovec *iov; int iov_count; Vector(const struct iovec *iov, int iov_count) : iov(iov), iov_count(iov_count) { } }; struct Bufferlist { ceph::bufferlist *bl; Bufferlist(ceph::bufferlist *bl) : bl(bl) { } }; struct SparseBufferlist { Extents *extent_map; ceph::bufferlist *bl; Extents image_extents; SparseBufferlist(Extents* extent_map, ceph::bufferlist* bl) : extent_map(extent_map), bl(bl) { } }; typedef boost::variant<Empty, Linear, Vector, Bufferlist, SparseBufferlist> Buffer; struct SetImageExtentsVisitor; struct AssembleResultVisitor; Buffer m_buffer; Striper::StripedReadResult m_destriper; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_READ_RESULT_H
2,875
21.123077
78
h
null
ceph-main/src/librbd/io/RefreshImageDispatch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/RefreshImageDispatch.h" #include "common/dout.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include <map> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::RefreshImageDispatch: " << this \ << " " << __func__ << ": " namespace librbd { namespace io { template <typename I> RefreshImageDispatch<I>::RefreshImageDispatch(I* image_ctx) : m_image_ctx(image_ctx) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "ictx=" << image_ctx << dendl; } template <typename I> void RefreshImageDispatch<I>::shut_down(Context* on_finish) { on_finish->complete(0); } template <typename I> bool RefreshImageDispatch<I>::read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_refresh(dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool RefreshImageDispatch<I>::write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_refresh(dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool RefreshImageDispatch<I>::discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_refresh(dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool RefreshImageDispatch<I>::write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_refresh(dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool RefreshImageDispatch<I>::compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents << dendl; if (needs_refresh(dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool RefreshImageDispatch<I>::flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "tid=" << tid << dendl; // The refresh state machine can initiate a flush and it can // enable the exclusive-lock which will also attempt to flush. if (flush_source == FLUSH_SOURCE_REFRESH || flush_source == FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH || flush_source == FLUSH_SOURCE_SHUTDOWN) { return false; } if (needs_refresh(dispatch_result, on_dispatched)) { return true; } return false; } template <typename I> bool RefreshImageDispatch<I>::needs_refresh( DispatchResult* dispatch_result, Context* on_dispatched) { auto cct = m_image_ctx->cct; if (m_image_ctx->state->is_refresh_required()) { ldout(cct, 15) << "on_dispatched=" << on_dispatched << dendl; *dispatch_result = DISPATCH_RESULT_CONTINUE; m_image_ctx->state->refresh(on_dispatched); return true; } return false; } } // namespace io } // namespace librbd template class librbd::io::RefreshImageDispatch<librbd::ImageCtx>;
5,028
29.113772
79
cc
null
ceph-main/src/librbd/io/RefreshImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_REFRESH_IMAGE_DISPATCH_H #define CEPH_LIBRBD_IO_REFRESH_IMAGE_DISPATCH_H #include "librbd/io/ImageDispatchInterface.h" #include "include/int_types.h" #include "include/buffer.h" #include "common/zipkin_trace.h" #include "common/Throttle.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; template <typename ImageCtxT> class RefreshImageDispatch : public ImageDispatchInterface { public: RefreshImageDispatch(ImageCtxT* image_ctx); ImageDispatchLayer get_dispatch_layer() const override { return IMAGE_DISPATCH_LAYER_REFRESH; } void shut_down(Context* on_finish) override; bool read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } private: ImageCtxT* m_image_ctx; bool needs_refresh(DispatchResult* dispatch_result, Context* on_dispatched); }; } // namespace io } // namespace librbd extern template class librbd::io::RefreshImageDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IO_REFRESH_IMAGE_DISPATCH_H
3,518
33.5
78
h
null
ceph-main/src/librbd/io/SimpleSchedulerObjectDispatch.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/SimpleSchedulerObjectDispatch.h" #include "include/neorados/RADOS.hpp" #include "common/ceph_time.h" #include "common/Timer.h" #include "common/errno.h" #include "librbd/AsioEngine.h" #include "librbd/ImageCtx.h" #include "librbd/Utils.h" #include "librbd/io/FlushTracker.h" #include "librbd/io/ObjectDispatchSpec.h" #include "librbd/io/ObjectDispatcher.h" #include "librbd/io/Utils.h" #include <boost/accumulators/accumulators.hpp> #include <boost/accumulators/statistics/rolling_count.hpp> #include <boost/accumulators/statistics/rolling_sum.hpp> #include <boost/accumulators/statistics/stats.hpp> #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::SimpleSchedulerObjectDispatch: " \ << this << " " << __func__ << ": " namespace librbd { namespace io { using namespace boost::accumulators; using ceph::operator<<; using librbd::util::data_object_name; static const int LATENCY_STATS_WINDOW_SIZE = 10; class LatencyStats { private: accumulator_set<uint64_t, stats<tag::rolling_count, tag::rolling_sum>> m_acc; public: LatencyStats() : m_acc(tag::rolling_window::window_size = LATENCY_STATS_WINDOW_SIZE) { } bool is_ready() const { return rolling_count(m_acc) == LATENCY_STATS_WINDOW_SIZE; } void add(uint64_t latency) { m_acc(latency); } uint64_t avg() const { auto count = rolling_count(m_acc); if (count > 0) { return rolling_sum(m_acc); } return 0; } }; template <typename I> bool SimpleSchedulerObjectDispatch<I>::ObjectRequests::try_delay_request( uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int object_dispatch_flags, Context* on_dispatched) { if (!m_delayed_requests.empty()) { if (!m_io_context || *m_io_context != *io_context || op_flags != m_op_flags || data.length() == 0 || intersects(object_off, data.length())) { return false; } } else { m_io_context = io_context; m_op_flags = op_flags; } if (data.length() == 0) { // a zero length write is usually a special case, // and we don't want it to be merged with others ceph_assert(m_delayed_requests.empty()); m_delayed_request_extents.insert(0, UINT64_MAX); } else { m_delayed_request_extents.insert(object_off, data.length()); } m_object_dispatch_flags |= object_dispatch_flags; if (!m_delayed_requests.empty()) { // try to merge front to an existing request auto iter = m_delayed_requests.find(object_off + data.length()); if (iter != m_delayed_requests.end()) { auto new_iter = m_delayed_requests.insert({object_off, {}}).first; new_iter->second.data = std::move(data); new_iter->second.data.append(std::move(iter->second.data)); new_iter->second.requests = std::move(iter->second.requests); new_iter->second.requests.push_back(on_dispatched); m_delayed_requests.erase(iter); if (new_iter != m_delayed_requests.begin()) { auto prev = new_iter; try_merge_delayed_requests(--prev, new_iter); } return true; } // try to merge back to an existing request iter = m_delayed_requests.lower_bound(object_off); if (iter != m_delayed_requests.begin() && (iter == m_delayed_requests.end() || iter->first > object_off)) { iter--; } if (iter != m_delayed_requests.end() && iter->first + iter->second.data.length() == object_off) { iter->second.data.append(std::move(data)); iter->second.requests.push_back(on_dispatched); auto next = iter; if (++next != m_delayed_requests.end()) { try_merge_delayed_requests(iter, next); } return true; } } // create a new request auto iter = m_delayed_requests.insert({object_off, {}}).first; iter->second.data = std::move(data); iter->second.requests.push_back(on_dispatched); return true; } template <typename I> void SimpleSchedulerObjectDispatch<I>::ObjectRequests::try_merge_delayed_requests( typename std::map<uint64_t, MergedRequests>::iterator &iter1, typename std::map<uint64_t, MergedRequests>::iterator &iter2) { if (iter1->first + iter1->second.data.length() != iter2->first) { return; } iter1->second.data.append(std::move(iter2->second.data)); iter1->second.requests.insert(iter1->second.requests.end(), iter2->second.requests.begin(), iter2->second.requests.end()); m_delayed_requests.erase(iter2); } template <typename I> void SimpleSchedulerObjectDispatch<I>::ObjectRequests::dispatch_delayed_requests( I *image_ctx, LatencyStats *latency_stats, ceph::mutex *latency_stats_lock) { for (auto &it : m_delayed_requests) { auto offset = it.first; auto &merged_requests = it.second; auto ctx = new LambdaContext( [requests=std::move(merged_requests.requests), latency_stats, latency_stats_lock, start_time=ceph_clock_now()](int r) { if (latency_stats) { std::lock_guard locker{*latency_stats_lock}; auto latency = ceph_clock_now() - start_time; latency_stats->add(latency.to_nsec()); } for (auto on_dispatched : requests) { on_dispatched->complete(r); } }); auto req = ObjectDispatchSpec::create_write( image_ctx, OBJECT_DISPATCH_LAYER_SCHEDULER, m_object_no, offset, std::move(merged_requests.data), m_io_context, m_op_flags, 0, std::nullopt, 0, {}, ctx); req->object_dispatch_flags = m_object_dispatch_flags; req->send(); } m_dispatch_time = {}; } template <typename I> SimpleSchedulerObjectDispatch<I>::SimpleSchedulerObjectDispatch( I* image_ctx) : m_image_ctx(image_ctx), m_flush_tracker(new FlushTracker<I>(image_ctx)), m_lock(ceph::make_mutex(librbd::util::unique_lock_name( "librbd::io::SimpleSchedulerObjectDispatch::lock", this))), m_max_delay(image_ctx->config.template get_val<uint64_t>( "rbd_io_scheduler_simple_max_delay")) { CephContext *cct = m_image_ctx->cct; ldout(cct, 5) << "ictx=" << image_ctx << dendl; I::get_timer_instance(cct, &m_timer, &m_timer_lock); if (m_max_delay == 0) { m_latency_stats = std::make_unique<LatencyStats>(); } } template <typename I> SimpleSchedulerObjectDispatch<I>::~SimpleSchedulerObjectDispatch() { delete m_flush_tracker; } template <typename I> void SimpleSchedulerObjectDispatch<I>::init() { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; // add ourself to the IO object dispatcher chain m_image_ctx->io_object_dispatcher->register_dispatch(this); } template <typename I> void SimpleSchedulerObjectDispatch<I>::shut_down(Context* on_finish) { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; m_flush_tracker->shut_down(); on_finish->complete(0); } template <typename I> bool SimpleSchedulerObjectDispatch<I>::read( uint64_t object_no, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << extents << dendl; std::lock_guard locker{m_lock}; for (auto& extent : *extents) { if (intersects(object_no, extent.offset, extent.length)) { dispatch_delayed_requests(object_no); break; } } return false; } template <typename I> bool SimpleSchedulerObjectDispatch<I>::discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << object_len << dendl; std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); register_in_flight_request(object_no, {}, on_finish); return false; } template <typename I> bool SimpleSchedulerObjectDispatch<I>::write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << data.length() << dendl; std::lock_guard locker{m_lock}; // don't try to batch assert version writes if (assert_version.has_value() || (write_flags & OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0) { dispatch_delayed_requests(object_no); return false; } if (try_delay_write(object_no, object_off, std::move(data), io_context, op_flags, *object_dispatch_flags, on_dispatched)) { auto dispatch_seq = ++m_dispatch_seq; m_flush_tracker->start_io(dispatch_seq); *on_finish = new LambdaContext( [this, dispatch_seq, ctx=*on_finish](int r) { ctx->complete(r); m_flush_tracker->finish_io(dispatch_seq); }); *dispatch_result = DISPATCH_RESULT_COMPLETE; return true; } dispatch_delayed_requests(object_no); register_in_flight_request(object_no, ceph_clock_now(), on_finish); return false; } template <typename I> bool SimpleSchedulerObjectDispatch<I>::write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << object_len << dendl; std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); register_in_flight_request(object_no, {}, on_finish); return false; } template <typename I> bool SimpleSchedulerObjectDispatch<I>::compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << cmp_data.length() << dendl; std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); register_in_flight_request(object_no, {}, on_finish); return false; } template <typename I> bool SimpleSchedulerObjectDispatch<I>::flush( FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; { std::lock_guard locker{m_lock}; dispatch_all_delayed_requests(); } *dispatch_result = DISPATCH_RESULT_CONTINUE; m_flush_tracker->flush(on_dispatched); return true; } template <typename I> bool SimpleSchedulerObjectDispatch<I>::intersects( uint64_t object_no, uint64_t object_off, uint64_t len) const { ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; auto it = m_requests.find(object_no); bool intersects = (it != m_requests.end()) && it->second->intersects(object_off, len); ldout(cct, 20) << intersects << dendl; return intersects; } template <typename I> bool SimpleSchedulerObjectDispatch<I>::try_delay_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int object_dispatch_flags, Context* on_dispatched) { ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; if (m_latency_stats && !m_latency_stats->is_ready()) { ldout(cct, 20) << "latency stats not collected yet" << dendl; return false; } auto it = m_requests.find(object_no); if (it == m_requests.end()) { ldout(cct, 20) << "no pending requests" << dendl; return false; } auto &object_requests = it->second; bool delayed = object_requests->try_delay_request( object_off, std::move(data), io_context, op_flags, object_dispatch_flags, on_dispatched); ldout(cct, 20) << "delayed: " << delayed << dendl; // schedule dispatch on the first request added if (delayed && !object_requests->is_scheduled_dispatch()) { auto dispatch_time = ceph::real_clock::now(); if (m_latency_stats) { dispatch_time += std::chrono::nanoseconds(m_latency_stats->avg() / 2); } else { dispatch_time += std::chrono::milliseconds(m_max_delay); } object_requests->set_scheduled_dispatch(dispatch_time); m_dispatch_queue.push_back(object_requests); if (m_dispatch_queue.front() == object_requests) { schedule_dispatch_delayed_requests(); } } return delayed; } template <typename I> void SimpleSchedulerObjectDispatch<I>::dispatch_all_delayed_requests() { ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; while (!m_requests.empty()) { auto it = m_requests.begin(); dispatch_delayed_requests(it->second); m_requests.erase(it); } } template <typename I> void SimpleSchedulerObjectDispatch<I>::register_in_flight_request( uint64_t object_no, const utime_t &start_time, Context **on_finish) { auto res = m_requests.insert( {object_no, std::make_shared<ObjectRequests>(object_no)}); ceph_assert(res.second); auto it = res.first; auto dispatch_seq = ++m_dispatch_seq; m_flush_tracker->start_io(dispatch_seq); it->second->set_dispatch_seq(dispatch_seq); *on_finish = new LambdaContext( [this, object_no, dispatch_seq, start_time, ctx=*on_finish](int r) { ctx->complete(r); std::unique_lock locker{m_lock}; if (m_latency_stats && start_time != utime_t()) { auto latency = ceph_clock_now() - start_time; m_latency_stats->add(latency.to_nsec()); } auto it = m_requests.find(object_no); if (it == m_requests.end() || it->second->get_dispatch_seq() != dispatch_seq) { ldout(m_image_ctx->cct, 20) << "already dispatched" << dendl; } else { dispatch_delayed_requests(it->second); m_requests.erase(it); } locker.unlock(); m_flush_tracker->finish_io(dispatch_seq); }); } template <typename I> void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests( uint64_t object_no) { ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; auto it = m_requests.find(object_no); if (it == m_requests.end()) { ldout(cct, 20) << "object_no=" << object_no << ": not found" << dendl; return; } dispatch_delayed_requests(it->second); m_requests.erase(it); } template <typename I> void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests( ObjectRequestsRef object_requests) { ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; ldout(cct, 20) << "object_no=" << object_requests->get_object_no() << ", " << object_requests->delayed_requests_size() << " requests, " << "dispatch_time=" << object_requests->get_dispatch_time() << dendl; if (!object_requests->is_scheduled_dispatch()) { return; } object_requests->dispatch_delayed_requests(m_image_ctx, m_latency_stats.get(), &m_lock); ceph_assert(!m_dispatch_queue.empty()); if (m_dispatch_queue.front() == object_requests) { m_dispatch_queue.pop_front(); schedule_dispatch_delayed_requests(); } } template <typename I> void SimpleSchedulerObjectDispatch<I>::schedule_dispatch_delayed_requests() { ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; std::lock_guard timer_locker{*m_timer_lock}; if (m_timer_task != nullptr) { ldout(cct, 20) << "canceling task " << m_timer_task << dendl; bool canceled = m_timer->cancel_event(m_timer_task); ceph_assert(canceled); m_timer_task = nullptr; } if (m_dispatch_queue.empty()) { ldout(cct, 20) << "nothing to schedule" << dendl; return; } auto object_requests = m_dispatch_queue.front().get(); while (!object_requests->is_scheduled_dispatch()) { ldout(cct, 20) << "garbage collecting " << object_requests << dendl; m_dispatch_queue.pop_front(); if (m_dispatch_queue.empty()) { ldout(cct, 20) << "nothing to schedule" << dendl; return; } object_requests = m_dispatch_queue.front().get(); } m_timer_task = new LambdaContext( [this, object_no=object_requests->get_object_no()](int r) { ceph_assert(ceph_mutex_is_locked(*m_timer_lock)); auto cct = m_image_ctx->cct; ldout(cct, 20) << "running timer task " << m_timer_task << dendl; m_timer_task = nullptr; m_image_ctx->asio_engine->post( [this, object_no]() { std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); }); }); ldout(cct, 20) << "scheduling task " << m_timer_task << " at " << object_requests->get_dispatch_time() << dendl; m_timer->add_event_at(object_requests->get_dispatch_time(), m_timer_task); } } // namespace io } // namespace librbd template class librbd::io::SimpleSchedulerObjectDispatch<librbd::ImageCtx>;
18,211
31.176678
82
cc
null
ceph-main/src/librbd/io/SimpleSchedulerObjectDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H #define CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H #include "common/ceph_mutex.h" #include "include/interval_set.h" #include "include/utime.h" #include "librbd/io/ObjectDispatchInterface.h" #include "librbd/io/TypeTraits.h" #include <list> #include <map> #include <memory> namespace librbd { class ImageCtx; namespace io { template <typename> class FlushTracker; class LatencyStats; /** * Simple scheduler plugin for object dispatcher layer. */ template <typename ImageCtxT = ImageCtx> class SimpleSchedulerObjectDispatch : public ObjectDispatchInterface { private: // mock unit testing support typedef ::librbd::io::TypeTraits<ImageCtxT> TypeTraits; typedef typename TypeTraits::SafeTimer SafeTimer; public: static SimpleSchedulerObjectDispatch* create(ImageCtxT* image_ctx) { return new SimpleSchedulerObjectDispatch(image_ctx); } SimpleSchedulerObjectDispatch(ImageCtxT* image_ctx); ~SimpleSchedulerObjectDispatch() override; ObjectDispatchLayer get_dispatch_layer() const override { return OBJECT_DISPATCH_LAYER_SCHEDULER; } void init(); void shut_down(Context* on_finish) override; bool read( uint64_t object_no, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } bool reset_existence_cache(Context* on_finish) override { return false; } void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) override { } int prepare_copyup( uint64_t object_no, SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override { return 0; } private: struct MergedRequests { ceph::bufferlist data; std::list<Context *> requests; }; class ObjectRequests { public: using clock_t = ceph::real_clock; ObjectRequests(uint64_t object_no) : m_object_no(object_no) { } uint64_t get_object_no() const { return m_object_no; } void set_dispatch_seq(uint64_t dispatch_seq) { m_dispatch_seq = dispatch_seq; } uint64_t get_dispatch_seq() const { return m_dispatch_seq; } clock_t::time_point get_dispatch_time() const { return m_dispatch_time; } void set_scheduled_dispatch(const clock_t::time_point &dispatch_time) { m_dispatch_time = dispatch_time; } bool is_scheduled_dispatch() const { return !clock_t::is_zero(m_dispatch_time); } size_t delayed_requests_size() const { return m_delayed_requests.size(); } bool intersects(uint64_t object_off, uint64_t len) const { return m_delayed_request_extents.intersects(object_off, len); } bool try_delay_request(uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int object_dispatch_flags, Context* on_dispatched); void dispatch_delayed_requests(ImageCtxT *image_ctx, LatencyStats *latency_stats, ceph::mutex *latency_stats_lock); private: uint64_t m_object_no; uint64_t m_dispatch_seq = 0; clock_t::time_point m_dispatch_time; IOContext m_io_context; int m_op_flags = 0; int m_object_dispatch_flags = 0; std::map<uint64_t, MergedRequests> m_delayed_requests; interval_set<uint64_t> m_delayed_request_extents; void try_merge_delayed_requests( typename std::map<uint64_t, MergedRequests>::iterator &iter, typename std::map<uint64_t, MergedRequests>::iterator &iter2); }; typedef std::shared_ptr<ObjectRequests> ObjectRequestsRef; typedef std::map<uint64_t, ObjectRequestsRef> Requests; ImageCtxT *m_image_ctx; FlushTracker<ImageCtxT>* m_flush_tracker; ceph::mutex m_lock; SafeTimer *m_timer; ceph::mutex *m_timer_lock; uint64_t m_max_delay; uint64_t m_dispatch_seq = 0; Requests m_requests; std::list<ObjectRequestsRef> m_dispatch_queue; Context *m_timer_task = nullptr; std::unique_ptr<LatencyStats> m_latency_stats; bool try_delay_write(uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int object_dispatch_flags, Context* on_dispatched); bool intersects(uint64_t object_no, uint64_t object_off, uint64_t len) const; void dispatch_all_delayed_requests(); void dispatch_delayed_requests(uint64_t object_no); void dispatch_delayed_requests(ObjectRequestsRef object_requests); void register_in_flight_request(uint64_t object_no, const utime_t &start_time, Context** on_finish); void schedule_dispatch_delayed_requests(); }; } // namespace io } // namespace librbd extern template class librbd::io::SimpleSchedulerObjectDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
7,396
31.442982
82
h
null
ceph-main/src/librbd/io/TypeTraits.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_TYPE_TRAITS_H #define CEPH_LIBRBD_IO_TYPE_TRAITS_H #include "common/Timer.h" namespace librbd { namespace io { template <typename IoCtxT> struct TypeTraits { typedef ::SafeTimer SafeTimer; }; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_TYPE_TRAITS_H
400
18.095238
70
h
null
ceph-main/src/librbd/io/Types.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/Types.h" #include <iostream> namespace librbd { namespace io { const WriteReadSnapIds INITIAL_WRITE_READ_SNAP_IDS{0, 0}; std::ostream& operator<<(std::ostream& os, SparseExtentState state) { switch (state) { case SPARSE_EXTENT_STATE_DNE: os << "dne"; break; case SPARSE_EXTENT_STATE_ZEROED: os << "zeroed"; break; case SPARSE_EXTENT_STATE_DATA: os << "data"; break; default: ceph_abort(); break; } return os; } std::ostream& operator<<(std::ostream& os, const SparseExtent& se) { os << "[" << "state=" << se.state << ", " << "length=" << se.length << "]"; return os; } std::ostream& operator<<(std::ostream& os, ImageArea area) { switch (area) { case ImageArea::DATA: return os << "data"; case ImageArea::CRYPTO_HEADER: return os << "crypto_header"; default: ceph_abort(); } } } // namespace io } // namespace librbd
1,024
19.5
70
cc
null
ceph-main/src/librbd/io/Types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_TYPES_H #define CEPH_LIBRBD_IO_TYPES_H #include "include/int_types.h" #include "include/rados/rados_types.hpp" #include "common/interval_map.h" #include "osdc/StriperTypes.h" #include <iosfwd> #include <map> #include <vector> struct Context; namespace librbd { namespace io { typedef enum { AIO_TYPE_NONE = 0, AIO_TYPE_GENERIC, AIO_TYPE_OPEN, AIO_TYPE_CLOSE, AIO_TYPE_READ, AIO_TYPE_WRITE, AIO_TYPE_DISCARD, AIO_TYPE_FLUSH, AIO_TYPE_WRITESAME, AIO_TYPE_COMPARE_AND_WRITE, } aio_type_t; enum FlushSource { FLUSH_SOURCE_USER, FLUSH_SOURCE_INTERNAL, FLUSH_SOURCE_SHUTDOWN, FLUSH_SOURCE_EXCLUSIVE_LOCK, FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH, FLUSH_SOURCE_REFRESH, FLUSH_SOURCE_WRITEBACK, FLUSH_SOURCE_WRITE_BLOCK, }; enum Direction { DIRECTION_READ, DIRECTION_WRITE, DIRECTION_BOTH }; enum DispatchResult { DISPATCH_RESULT_INVALID, DISPATCH_RESULT_RESTART, DISPATCH_RESULT_CONTINUE, DISPATCH_RESULT_COMPLETE }; enum ImageDispatchLayer { IMAGE_DISPATCH_LAYER_NONE = 0, IMAGE_DISPATCH_LAYER_API_START = IMAGE_DISPATCH_LAYER_NONE, IMAGE_DISPATCH_LAYER_QUEUE, IMAGE_DISPATCH_LAYER_QOS, IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, IMAGE_DISPATCH_LAYER_REFRESH, IMAGE_DISPATCH_LAYER_INTERNAL_START = IMAGE_DISPATCH_LAYER_REFRESH, IMAGE_DISPATCH_LAYER_MIGRATION, IMAGE_DISPATCH_LAYER_JOURNAL, IMAGE_DISPATCH_LAYER_WRITE_BLOCK, IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, IMAGE_DISPATCH_LAYER_CRYPTO, IMAGE_DISPATCH_LAYER_CORE, IMAGE_DISPATCH_LAYER_LAST }; enum { IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE = 1 << 0, IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE = 1 << 1, IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE = 1 << 2, IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE = 1 << 3, IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE = 1 << 4, IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE = 1 << 5, IMAGE_DISPATCH_FLAG_QOS_BPS_MASK = ( IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE | IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE | IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE), IMAGE_DISPATCH_FLAG_QOS_IOPS_MASK = ( IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE | IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE | IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE), IMAGE_DISPATCH_FLAG_QOS_READ_MASK = ( IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE | IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE), IMAGE_DISPATCH_FLAG_QOS_WRITE_MASK = ( IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE | IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE), IMAGE_DISPATCH_FLAG_QOS_MASK = ( IMAGE_DISPATCH_FLAG_QOS_BPS_MASK | IMAGE_DISPATCH_FLAG_QOS_IOPS_MASK), // TODO: pass area through ImageDispatchInterface and remove // this flag IMAGE_DISPATCH_FLAG_CRYPTO_HEADER = 1 << 6 }; enum { RBD_IO_OPERATIONS_DEFAULT = 0, RBD_IO_OPERATION_READ = 1 << 0, RBD_IO_OPERATION_WRITE = 1 << 1, RBD_IO_OPERATION_DISCARD = 1 << 2, RBD_IO_OPERATION_WRITE_SAME = 1 << 3, RBD_IO_OPERATION_COMPARE_AND_WRITE = 1 << 4, RBD_IO_OPERATIONS_ALL = ( RBD_IO_OPERATION_READ | RBD_IO_OPERATION_WRITE | RBD_IO_OPERATION_DISCARD | RBD_IO_OPERATION_WRITE_SAME | RBD_IO_OPERATION_COMPARE_AND_WRITE) }; enum ObjectDispatchLayer { OBJECT_DISPATCH_LAYER_NONE = 0, OBJECT_DISPATCH_LAYER_CACHE, OBJECT_DISPATCH_LAYER_CRYPTO, OBJECT_DISPATCH_LAYER_JOURNAL, OBJECT_DISPATCH_LAYER_PARENT_CACHE, OBJECT_DISPATCH_LAYER_SCHEDULER, OBJECT_DISPATCH_LAYER_CORE, OBJECT_DISPATCH_LAYER_LAST }; enum { READ_FLAG_DISABLE_READ_FROM_PARENT = 1UL << 0, READ_FLAG_DISABLE_CLIPPING = 1UL << 1, }; enum { OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE = 1UL << 0 }; enum { OBJECT_DISCARD_FLAG_DISABLE_CLONE_REMOVE = 1UL << 0, OBJECT_DISCARD_FLAG_DISABLE_OBJECT_MAP_UPDATE = 1UL << 1 }; enum { OBJECT_DISPATCH_FLAG_FLUSH = 1UL << 0, OBJECT_DISPATCH_FLAG_WILL_RETRY_ON_ERROR = 1UL << 1 }; enum { LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT = 1UL << 0, LIST_SNAPS_FLAG_WHOLE_OBJECT = 1UL << 1, LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS = 1UL << 2, }; enum SparseExtentState { SPARSE_EXTENT_STATE_DNE, /* does not exist */ SPARSE_EXTENT_STATE_ZEROED, SPARSE_EXTENT_STATE_DATA }; std::ostream& operator<<(std::ostream& os, SparseExtentState state); struct SparseExtent { SparseExtentState state; uint64_t length; SparseExtent(SparseExtentState state, uint64_t length) : state(state), length(length) { } operator SparseExtentState() const { return state; } bool operator==(const SparseExtent& rhs) const { return state == rhs.state && length == rhs.length; } }; std::ostream& operator<<(std::ostream& os, const SparseExtent& state); struct SparseExtentSplitMerge { SparseExtent split(uint64_t offset, uint64_t length, SparseExtent &se) const { return SparseExtent(se.state, se.length); } bool can_merge(const SparseExtent& left, const SparseExtent& right) const { return left.state == right.state; } SparseExtent merge(SparseExtent&& left, SparseExtent&& right) const { SparseExtent se(left); se.length += right.length; return se; } uint64_t length(const SparseExtent& se) const { return se.length; } }; typedef interval_map<uint64_t, SparseExtent, SparseExtentSplitMerge> SparseExtents; typedef std::vector<uint64_t> SnapIds; typedef std::pair<librados::snap_t, librados::snap_t> WriteReadSnapIds; extern const WriteReadSnapIds INITIAL_WRITE_READ_SNAP_IDS; typedef std::map<WriteReadSnapIds, SparseExtents> SnapshotDelta; struct SparseBufferlistExtent : public SparseExtent { ceph::bufferlist bl; SparseBufferlistExtent(SparseExtentState state, uint64_t length) : SparseExtent(state, length) { ceph_assert(state != SPARSE_EXTENT_STATE_DATA); } SparseBufferlistExtent(SparseExtentState state, uint64_t length, ceph::bufferlist&& bl_) : SparseExtent(state, length), bl(std::move(bl_)) { ceph_assert(state != SPARSE_EXTENT_STATE_DATA || length == bl.length()); } bool operator==(const SparseBufferlistExtent& rhs) const { return (state == rhs.state && length == rhs.length && bl.contents_equal(rhs.bl)); } }; struct SparseBufferlistExtentSplitMerge { SparseBufferlistExtent split(uint64_t offset, uint64_t length, SparseBufferlistExtent& sbe) const { ceph::bufferlist bl; if (sbe.state == SPARSE_EXTENT_STATE_DATA) { bl.substr_of(bl, offset, length); } return SparseBufferlistExtent(sbe.state, length, std::move(bl)); } bool can_merge(const SparseBufferlistExtent& left, const SparseBufferlistExtent& right) const { return left.state == right.state; } SparseBufferlistExtent merge(SparseBufferlistExtent&& left, SparseBufferlistExtent&& right) const { if (left.state == SPARSE_EXTENT_STATE_DATA) { ceph::bufferlist bl{std::move(left.bl)}; bl.claim_append(std::move(right.bl)); return SparseBufferlistExtent(SPARSE_EXTENT_STATE_DATA, bl.length(), std::move(bl)); } else { return SparseBufferlistExtent(left.state, left.length + right.length, {}); } } uint64_t length(const SparseBufferlistExtent& sbe) const { return sbe.length; } }; typedef interval_map<uint64_t, SparseBufferlistExtent, SparseBufferlistExtentSplitMerge> SparseBufferlist; typedef std::map<uint64_t, SparseBufferlist> SnapshotSparseBufferlist; using striper::LightweightBufferExtents; using striper::LightweightObjectExtent; using striper::LightweightObjectExtents; typedef std::pair<uint64_t,uint64_t> Extent; typedef std::vector<Extent> Extents; enum class ImageArea { DATA, CRYPTO_HEADER }; std::ostream& operator<<(std::ostream& os, ImageArea area); struct ReadExtent { const uint64_t offset; const uint64_t length; const LightweightBufferExtents buffer_extents; ceph::bufferlist bl; Extents extent_map; ReadExtent(uint64_t offset, uint64_t length) : offset(offset), length(length) {}; ReadExtent(uint64_t offset, uint64_t length, const LightweightBufferExtents&& buffer_extents) : offset(offset), length(length), buffer_extents(buffer_extents) {} ReadExtent(uint64_t offset, uint64_t length, const LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& bl, Extents&& extent_map) : offset(offset), length(length), buffer_extents(buffer_extents), bl(bl), extent_map(extent_map) {}; friend inline std::ostream& operator<<( std::ostream& os, const ReadExtent &extent) { os << "offset=" << extent.offset << ", " << "length=" << extent.length << ", " << "buffer_extents=" << extent.buffer_extents << ", " << "bl.length=" << extent.bl.length() << ", " << "extent_map=" << extent.extent_map; return os; } }; typedef std::vector<ReadExtent> ReadExtents; typedef std::map<uint64_t, uint64_t> ExtentMap; } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_TYPES_H
9,779
28.726444
80
h
null
ceph-main/src/librbd/io/Utils.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/io/Utils.h" #include "common/dout.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "include/neorados/RADOS.hpp" #include "librbd/internal.h" #include "librbd/Utils.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ObjectRequest.h" #include "librbd/io/ImageDispatcherInterface.h" #include "osd/osd_types.h" #include "osdc/Striper.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::io::util: " << __func__ << ": " namespace librbd { namespace io { namespace util { void apply_op_flags(uint32_t op_flags, uint32_t flags, neorados::Op* op) { if (op_flags & LIBRADOS_OP_FLAG_FADVISE_RANDOM) op->set_fadvise_random(); if (op_flags & LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL) op->set_fadvise_sequential(); if (op_flags & LIBRADOS_OP_FLAG_FADVISE_WILLNEED) op->set_fadvise_willneed(); if (op_flags & LIBRADOS_OP_FLAG_FADVISE_DONTNEED) op->set_fadvise_dontneed(); if (op_flags & LIBRADOS_OP_FLAG_FADVISE_NOCACHE) op->set_fadvise_nocache(); if (flags & librados::OPERATION_BALANCE_READS) op->balance_reads(); if (flags & librados::OPERATION_LOCALIZE_READS) op->localize_reads(); } bool assemble_write_same_extent( const LightweightObjectExtent &object_extent, const ceph::bufferlist& data, ceph::bufferlist *ws_data, bool force_write) { size_t data_len = data.length(); if (!force_write) { bool may_writesame = true; for (auto& q : object_extent.buffer_extents) { if (!(q.first % data_len == 0 && q.second % data_len == 0)) { may_writesame = false; break; } } if (may_writesame) { ws_data->append(data); return true; } } for (auto& q : object_extent.buffer_extents) { bufferlist sub_bl; uint64_t sub_off = q.first % data_len; uint64_t sub_len = data_len - sub_off; uint64_t extent_left = q.second; while (extent_left >= sub_len) { sub_bl.substr_of(data, sub_off, sub_len); ws_data->claim_append(sub_bl); extent_left -= sub_len; if (sub_off) { sub_off = 0; sub_len = data_len; } } if (extent_left) { sub_bl.substr_of(data, sub_off, extent_left); ws_data->claim_append(sub_bl); } } return false; } template <typename I> void read_parent(I *image_ctx, uint64_t object_no, ReadExtents* read_extents, librados::snap_t snap_id, const ZTracer::Trace &trace, Context* on_finish) { auto cct = image_ctx->cct; std::shared_lock image_locker{image_ctx->image_lock}; Extents parent_extents; ImageArea area; uint64_t raw_overlap = 0; uint64_t object_overlap = 0; image_ctx->get_parent_overlap(snap_id, &raw_overlap); if (raw_overlap > 0) { // calculate reverse mapping onto the parent image Extents extents; for (const auto& extent : *read_extents) { extents.emplace_back(extent.offset, extent.length); } std::tie(parent_extents, area) = object_to_area_extents(image_ctx, object_no, extents); object_overlap = image_ctx->prune_parent_extents(parent_extents, area, raw_overlap, false); } if (object_overlap == 0) { image_locker.unlock(); on_finish->complete(-ENOENT); return; } ldout(cct, 20) << dendl; ceph::bufferlist* parent_read_bl; if (read_extents->size() > 1) { auto parent_comp = new ReadResult::C_ObjectReadMergedExtents( cct, read_extents, on_finish); parent_read_bl = &parent_comp->bl; on_finish = parent_comp; } else { parent_read_bl = &read_extents->front().bl; } auto comp = AioCompletion::create_and_start(on_finish, image_ctx->parent, AIO_TYPE_READ); ldout(cct, 20) << "completion=" << comp << " parent_extents=" << parent_extents << " area=" << area << dendl; auto req = io::ImageDispatchSpec::create_read( *image_ctx->parent, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, comp, std::move(parent_extents), area, ReadResult{parent_read_bl}, image_ctx->parent->get_data_io_context(), 0, 0, trace); req->send(); } template <typename I> int clip_request(I* image_ctx, Extents* image_extents, ImageArea area) { std::shared_lock image_locker{image_ctx->image_lock}; for (auto &image_extent : *image_extents) { auto clip_len = image_extent.second; int r = clip_io(librbd::util::get_image_ctx(image_ctx), image_extent.first, &clip_len, area); if (r < 0) { return r; } image_extent.second = clip_len; } return 0; } void unsparsify(CephContext* cct, ceph::bufferlist* bl, const Extents& extent_map, uint64_t bl_off, uint64_t out_bl_len) { Striper::StripedReadResult destriper; bufferlist out_bl; destriper.add_partial_sparse_result(cct, std::move(*bl), extent_map, bl_off, {{0, out_bl_len}}); destriper.assemble_result(cct, out_bl, true); *bl = out_bl; } template <typename I> bool trigger_copyup(I* image_ctx, uint64_t object_no, IOContext io_context, Context* on_finish) { bufferlist bl; auto req = new ObjectWriteRequest<I>( image_ctx, object_no, 0, std::move(bl), io_context, 0, 0, std::nullopt, {}, on_finish); if (!req->has_parent()) { delete req; return false; } req->send(); return true; } template <typename I> void area_to_object_extents(I* image_ctx, uint64_t offset, uint64_t length, ImageArea area, uint64_t buffer_offset, striper::LightweightObjectExtents* object_extents) { Extents extents = {{offset, length}}; image_ctx->io_image_dispatcher->remap_to_physical(extents, area); for (auto [off, len] : extents) { Striper::file_to_extents(image_ctx->cct, &image_ctx->layout, off, len, 0, buffer_offset, object_extents); } } template <typename I> std::pair<Extents, ImageArea> object_to_area_extents( I* image_ctx, uint64_t object_no, const Extents& object_extents) { Extents extents; for (auto [off, len] : object_extents) { Striper::extent_to_file(image_ctx->cct, &image_ctx->layout, object_no, off, len, extents); } auto area = image_ctx->io_image_dispatcher->remap_to_logical(extents); return {std::move(extents), area}; } template <typename I> uint64_t area_to_raw_offset(const I& image_ctx, uint64_t offset, ImageArea area) { Extents extents = {{offset, 0}}; image_ctx.io_image_dispatcher->remap_to_physical(extents, area); return extents[0].first; } template <typename I> std::pair<uint64_t, ImageArea> raw_to_area_offset(const I& image_ctx, uint64_t offset) { Extents extents = {{offset, 0}}; auto area = image_ctx.io_image_dispatcher->remap_to_logical(extents); return {extents[0].first, area}; } } // namespace util } // namespace io } // namespace librbd template void librbd::io::util::read_parent( librbd::ImageCtx *image_ctx, uint64_t object_no, ReadExtents* extents, librados::snap_t snap_id, const ZTracer::Trace &trace, Context* on_finish); template int librbd::io::util::clip_request( librbd::ImageCtx* image_ctx, Extents* image_extents, ImageArea area); template bool librbd::io::util::trigger_copyup( librbd::ImageCtx *image_ctx, uint64_t object_no, IOContext io_context, Context* on_finish); template void librbd::io::util::area_to_object_extents( librbd::ImageCtx* image_ctx, uint64_t offset, uint64_t length, ImageArea area, uint64_t buffer_offset, striper::LightweightObjectExtents* object_extents); template auto librbd::io::util::object_to_area_extents( librbd::ImageCtx* image_ctx, uint64_t object_no, const Extents& extents) -> std::pair<Extents, ImageArea>; template uint64_t librbd::io::util::area_to_raw_offset( const librbd::ImageCtx& image_ctx, uint64_t offset, ImageArea area); template auto librbd::io::util::raw_to_area_offset( const librbd::ImageCtx& image_ctx, uint64_t offset) -> std::pair<uint64_t, ImageArea>;
8,435
32.744
80
cc
null
ceph-main/src/librbd/io/Utils.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_UTILS_H #define CEPH_LIBRBD_IO_UTILS_H #include "include/int_types.h" #include "include/buffer_fwd.h" #include "include/rados/rados_types.hpp" #include "common/zipkin_trace.h" #include "librbd/Types.h" #include "librbd/io/Types.h" #include <map> class ObjectExtent; namespace neorados { struct Op; } namespace librbd { struct ImageCtx; namespace io { namespace util { void apply_op_flags(uint32_t op_flags, uint32_t flags, neorados::Op* op); bool assemble_write_same_extent(const LightweightObjectExtent &object_extent, const ceph::bufferlist& data, ceph::bufferlist *ws_data, bool force_write); template <typename ImageCtxT = librbd::ImageCtx> void read_parent(ImageCtxT *image_ctx, uint64_t object_no, ReadExtents* read_extents, librados::snap_t snap_id, const ZTracer::Trace &trace, Context* on_finish); template <typename ImageCtxT = librbd::ImageCtx> int clip_request(ImageCtxT* image_ctx, Extents* image_extents, ImageArea area); inline uint64_t get_extents_length(const Extents &extents) { uint64_t total_bytes = 0; for (auto [_, extent_length] : extents) { total_bytes += extent_length; } return total_bytes; } void unsparsify(CephContext* cct, ceph::bufferlist* bl, const Extents& extent_map, uint64_t bl_off, uint64_t out_bl_len); template <typename ImageCtxT = librbd::ImageCtx> bool trigger_copyup(ImageCtxT *image_ctx, uint64_t object_no, IOContext io_context, Context* on_finish); template <typename ImageCtxT = librbd::ImageCtx> void area_to_object_extents(ImageCtxT* image_ctx, uint64_t offset, uint64_t length, ImageArea area, uint64_t buffer_offset, striper::LightweightObjectExtents* object_extents); template <typename ImageCtxT = librbd::ImageCtx> std::pair<Extents, ImageArea> object_to_area_extents( ImageCtxT* image_ctx, uint64_t object_no, const Extents& object_extents); template <typename ImageCtxT = librbd::ImageCtx> uint64_t area_to_raw_offset(const ImageCtxT& image_ctx, uint64_t offset, ImageArea area); template <typename ImageCtxT = librbd::ImageCtx> std::pair<uint64_t, ImageArea> raw_to_area_offset(const ImageCtxT& image_ctx, uint64_t offset); inline ObjectDispatchLayer get_previous_layer(ObjectDispatchLayer layer) { return (ObjectDispatchLayer)(((int)layer) - 1); } } // namespace util } // namespace io } // namespace librbd #endif // CEPH_LIBRBD_IO_UTILS_H
2,803
32.380952
79
h