repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/common/async/bind_like.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat <[email protected]>
* Author: Adam C. Emerson
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <boost/asio/associated_allocator.hpp>
#include <boost/asio/associated_executor.hpp>
#include <boost/asio/bind_allocator.hpp>
#include <boost/asio/bind_executor.hpp>
namespace ceph::async {
template<typename Executor, typename Allocator, typename Completion>
auto bind_ea(const Executor& executor, const Allocator& allocator,
Completion&& completion) {
return bind_allocator(allocator,
boost::asio::bind_executor(
executor,
std::forward<Completion>(completion)));
}
// Bind `Completion` to the executor and allocator of `Proto`
template<typename Proto, typename Completion>
auto bind_like(const Proto& proto, Completion&& completion) {
return bind_ea(boost::asio::get_associated_executor(proto),
boost::asio::get_associated_allocator(proto),
std::forward<Completion>(completion));
}
}
| 1,272 | 30.825 | 70 | h |
null | ceph-main/src/common/async/blocked_completion.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_ASYNC_BLOCKED_COMPLETION_H
#define CEPH_COMMON_ASYNC_BLOCKED_COMPLETION_H
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <optional>
#include <type_traits>
#include <boost/asio/async_result.hpp>
#include <boost/system/error_code.hpp>
#include <boost/system/system_error.hpp>
namespace ceph::async {
namespace bs = boost::system;
class use_blocked_t {
use_blocked_t(bs::error_code* ec) : ec(ec) {}
public:
use_blocked_t() = default;
use_blocked_t operator [](bs::error_code& _ec) const {
return use_blocked_t(&_ec);
}
bs::error_code* ec = nullptr;
};
inline constexpr use_blocked_t use_blocked;
namespace detail {
template<typename... Ts>
struct blocked_handler
{
blocked_handler(use_blocked_t b) noexcept : ec(b.ec) {}
void operator ()(Ts... values) noexcept {
std::scoped_lock l(*m);
*ec = bs::error_code{};
*value = std::forward_as_tuple(std::move(values)...);
*done = true;
cv->notify_one();
}
void operator ()(bs::error_code ec, Ts... values) noexcept {
std::scoped_lock l(*m);
*this->ec = ec;
*value = std::forward_as_tuple(std::move(values)...);
*done = true;
cv->notify_one();
}
bs::error_code* ec;
std::optional<std::tuple<Ts...>>* value = nullptr;
std::mutex* m = nullptr;
std::condition_variable* cv = nullptr;
bool* done = nullptr;
};
template<typename T>
struct blocked_handler<T>
{
blocked_handler(use_blocked_t b) noexcept : ec(b.ec) {}
void operator ()(T value) noexcept {
std::scoped_lock l(*m);
*ec = bs::error_code();
*this->value = std::move(value);
*done = true;
cv->notify_one();
}
void operator ()(bs::error_code ec, T value) noexcept {
std::scoped_lock l(*m);
*this->ec = ec;
*this->value = std::move(value);
*done = true;
cv->notify_one();
}
//private:
bs::error_code* ec;
std::optional<T>* value;
std::mutex* m = nullptr;
std::condition_variable* cv = nullptr;
bool* done = nullptr;
};
template<>
struct blocked_handler<void>
{
blocked_handler(use_blocked_t b) noexcept : ec(b.ec) {}
void operator ()() noexcept {
std::scoped_lock l(*m);
*ec = bs::error_code{};
*done = true;
cv->notify_one();
}
void operator ()(bs::error_code ec) noexcept {
std::scoped_lock l(*m);
*this->ec = ec;
*done = true;
cv->notify_one();
}
bs::error_code* ec;
std::mutex* m = nullptr;
std::condition_variable* cv = nullptr;
bool* done = nullptr;
};
template<typename... Ts>
class blocked_result
{
public:
using completion_handler_type = blocked_handler<Ts...>;
using return_type = std::tuple<Ts...>;
explicit blocked_result(completion_handler_type& h) noexcept {
std::scoped_lock l(m);
out_ec = h.ec;
if (!out_ec) h.ec = &ec;
h.value = &value;
h.m = &m;
h.cv = &cv;
h.done = &done;
}
return_type get() {
std::unique_lock l(m);
cv.wait(l, [this]() { return done; });
if (!out_ec && ec) throw bs::system_error(ec);
return std::move(*value);
}
blocked_result(const blocked_result&) = delete;
blocked_result& operator =(const blocked_result&) = delete;
blocked_result(blocked_result&&) = delete;
blocked_result& operator =(blocked_result&&) = delete;
private:
bs::error_code* out_ec;
bs::error_code ec;
std::optional<return_type> value;
std::mutex m;
std::condition_variable cv;
bool done = false;
};
template<typename T>
class blocked_result<T>
{
public:
using completion_handler_type = blocked_handler<T>;
using return_type = T;
explicit blocked_result(completion_handler_type& h) noexcept {
std::scoped_lock l(m);
out_ec = h.ec;
if (!out_ec) h.ec = &ec;
h.value = &value;
h.m = &m;
h.cv = &cv;
h.done = &done;
}
return_type get() {
std::unique_lock l(m);
cv.wait(l, [this]() { return done; });
if (!out_ec && ec) throw bs::system_error(ec);
return std::move(*value);
}
blocked_result(const blocked_result&) = delete;
blocked_result& operator =(const blocked_result&) = delete;
blocked_result(blocked_result&&) = delete;
blocked_result& operator =(blocked_result&&) = delete;
private:
bs::error_code* out_ec;
bs::error_code ec;
std::optional<return_type> value;
std::mutex m;
std::condition_variable cv;
bool done = false;
};
template<>
class blocked_result<void>
{
public:
using completion_handler_type = blocked_handler<void>;
using return_type = void;
explicit blocked_result(completion_handler_type& h) noexcept {
std::scoped_lock l(m);
out_ec = h.ec;
if (!out_ec) h.ec = &ec;
h.m = &m;
h.cv = &cv;
h.done = &done;
}
void get() {
std::unique_lock l(m);
cv.wait(l, [this]() { return done; });
if (!out_ec && ec) throw bs::system_error(ec);
}
blocked_result(const blocked_result&) = delete;
blocked_result& operator =(const blocked_result&) = delete;
blocked_result(blocked_result&&) = delete;
blocked_result& operator =(blocked_result&&) = delete;
private:
bs::error_code* out_ec;
bs::error_code ec;
std::mutex m;
std::condition_variable cv;
bool done = false;
};
} // namespace detail
} // namespace ceph::async
namespace boost::asio {
template<typename ReturnType>
class async_result<ceph::async::use_blocked_t, ReturnType()>
: public ceph::async::detail::blocked_result<void>
{
public:
explicit async_result(typename ceph::async::detail::blocked_result<void>
::completion_handler_type& h)
: ceph::async::detail::blocked_result<void>(h) {}
};
template<typename ReturnType, typename... Args>
class async_result<ceph::async::use_blocked_t, ReturnType(Args...)>
: public ceph::async::detail::blocked_result<std::decay_t<Args>...>
{
public:
explicit async_result(
typename ceph::async::detail::blocked_result<std::decay_t<Args>...>::completion_handler_type& h)
: ceph::async::detail::blocked_result<std::decay_t<Args>...>(h) {}
};
template<typename ReturnType>
class async_result<ceph::async::use_blocked_t,
ReturnType(boost::system::error_code)>
: public ceph::async::detail::blocked_result<void>
{
public:
explicit async_result(
typename ceph::async::detail::blocked_result<void>::completion_handler_type& h)
: ceph::async::detail::blocked_result<void>(h) {}
};
template<typename ReturnType, typename... Args>
class async_result<ceph::async::use_blocked_t,
ReturnType(boost::system::error_code, Args...)>
: public ceph::async::detail::blocked_result<std::decay_t<Args>...>
{
public:
explicit async_result(
typename ceph::async::detail::blocked_result<std::decay_t<Args>...>::completion_handler_type& h)
: ceph::async::detail::blocked_result<std::decay_t<Args>...>(h) {}
};
}
#endif // !CEPH_COMMON_ASYNC_BLOCKED_COMPLETION_H
| 7,225 | 23.831615 | 100 | h |
null | ceph-main/src/common/async/completion.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ASYNC_COMPLETION_H
#define CEPH_ASYNC_COMPLETION_H
#include <memory>
#include "bind_handler.h"
#include "forward_handler.h"
namespace ceph::async {
/**
* Abstract completion handler interface for use with boost::asio.
*
* Memory management is performed using the Handler's 'associated allocator',
* which carries the additional requirement that its memory be released before
* the Handler is invoked. This allows memory allocated for one asynchronous
* operation to be reused in its continuation. Because of this requirement, any
* calls to invoke the completion must first release ownership of it. To enforce
* this, the static functions defer()/dispatch()/post() take the completion by
* rvalue-reference to std::unique_ptr<Completion>, i.e. std::move(completion).
*
* Handlers may also have an 'associated executor', so the calls to defer(),
* dispatch(), and post() are forwarded to that executor. If there is no
* associated executor (which is generally the case unless one was bound with
* boost::asio::bind_executor()), the executor passed to Completion::create()
* is used as a default.
*
* Example use:
*
* // declare a Completion type with Signature = void(int, string)
* using MyCompletion = ceph::async::Completion<void(int, string)>;
*
* // create a completion with the given callback:
* std::unique_ptr<MyCompletion> c;
* c = MyCompletion::create(ex, [] (int a, const string& b) {});
*
* // bind arguments to the callback and post to its associated executor:
* MyCompletion::post(std::move(c), 5, "hello");
*
*
* Additional user data may be stored along with the Completion to take
* advantage of the handler allocator optimization. This is accomplished by
* specifying its type in the template parameter T. For example, the type
* Completion<void(), int> contains a public member variable 'int user_data'.
* Any additional arguments to Completion::create() will be forwarded to type
* T's constructor.
*
* If the AsBase<T> type tag is used, as in Completion<void(), AsBase<T>>,
* the Completion will inherit from T instead of declaring it as a member
* variable.
*
* When invoking the completion handler via defer(), dispatch(), or post(),
* care must be taken when passing arguments that refer to user data, because
* its memory is destroyed prior to invocation. In such cases, the user data
* should be moved/copied out of the Completion first.
*/
template <typename Signature, typename T = void>
class Completion;
/// type tag for UserData
template <typename T> struct AsBase {};
namespace detail {
/// optional user data to be stored with the Completion
template <typename T>
struct UserData {
T user_data;
template <typename ...Args>
UserData(Args&& ...args)
: user_data(std::forward<Args>(args)...)
{}
};
// AsBase specialization inherits from T
template <typename T>
struct UserData<AsBase<T>> : public T {
template <typename ...Args>
UserData(Args&& ...args)
: T(std::forward<Args>(args)...)
{}
};
// void specialization
template <>
class UserData<void> {};
} // namespace detail
// template specialization to pull the Signature's args apart
template <typename T, typename ...Args>
class Completion<void(Args...), T> : public detail::UserData<T> {
protected:
// internal interfaces for type-erasure on the Handler/Executor. uses
// tuple<Args...> to provide perfect forwarding because you can't make
// virtual function templates
virtual void destroy_defer(std::tuple<Args...>&& args) = 0;
virtual void destroy_dispatch(std::tuple<Args...>&& args) = 0;
virtual void destroy_post(std::tuple<Args...>&& args) = 0;
virtual void destroy() = 0;
// constructor is protected, use create(). any constructor arguments are
// forwarded to UserData
template <typename ...TArgs>
Completion(TArgs&& ...args)
: detail::UserData<T>(std::forward<TArgs>(args)...)
{}
public:
virtual ~Completion() = default;
// use the virtual destroy() interface on delete. this allows the derived
// class to manage its memory using Handler allocators, without having to use
// a custom Deleter for std::unique_ptr<>
static void operator delete(void *p) {
static_cast<Completion*>(p)->destroy();
}
/// completion factory function that uses the handler's associated allocator.
/// any additional arguments are forwared to T's constructor
template <typename Executor1, typename Handler, typename ...TArgs>
static std::unique_ptr<Completion>
create(const Executor1& ex1, Handler&& handler, TArgs&& ...args);
/// take ownership of the completion, bind any arguments to the completion
/// handler, then defer() it on its associated executor
template <typename ...Args2>
static void defer(std::unique_ptr<Completion>&& c, Args2&&...args);
/// take ownership of the completion, bind any arguments to the completion
/// handler, then dispatch() it on its associated executor
template <typename ...Args2>
static void dispatch(std::unique_ptr<Completion>&& c, Args2&&...args);
/// take ownership of the completion, bind any arguments to the completion
/// handler, then post() it to its associated executor
template <typename ...Args2>
static void post(std::unique_ptr<Completion>&& c, Args2&&...args);
};
namespace detail {
// concrete Completion that knows how to invoke the completion handler. this
// observes all of the 'Requirements on asynchronous operations' specified by
// the C++ Networking TS
template <typename Executor1, typename Handler, typename T, typename ...Args>
class CompletionImpl final : public Completion<void(Args...), T> {
// use Handler's associated executor (or Executor1 by default) for callbacks
using Executor2 = boost::asio::associated_executor_t<Handler, Executor1>;
// maintain work on both executors
using Work1 = boost::asio::executor_work_guard<Executor1>;
using Work2 = boost::asio::executor_work_guard<Executor2>;
std::pair<Work1, Work2> work;
Handler handler;
// use Handler's associated allocator
using Alloc2 = boost::asio::associated_allocator_t<Handler>;
using Traits2 = std::allocator_traits<Alloc2>;
using RebindAlloc2 = typename Traits2::template rebind_alloc<CompletionImpl>;
using RebindTraits2 = std::allocator_traits<RebindAlloc2>;
// placement new for the handler allocator
static void* operator new(size_t, RebindAlloc2 alloc2) {
return RebindTraits2::allocate(alloc2, 1);
}
// placement delete for when the constructor throws during placement new
static void operator delete(void *p, RebindAlloc2 alloc2) {
RebindTraits2::deallocate(alloc2, static_cast<CompletionImpl*>(p), 1);
}
static auto bind_and_forward(Handler&& h, std::tuple<Args...>&& args) {
return forward_handler(CompletionHandler{std::move(h), std::move(args)});
}
void destroy_defer(std::tuple<Args...>&& args) override {
auto w = std::move(work);
auto f = bind_and_forward(std::move(handler), std::move(args));
RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler);
RebindTraits2::destroy(alloc2, this);
RebindTraits2::deallocate(alloc2, this, 1);
w.second.get_executor().defer(std::move(f), alloc2);
}
void destroy_dispatch(std::tuple<Args...>&& args) override {
auto w = std::move(work);
auto f = bind_and_forward(std::move(handler), std::move(args));
RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler);
RebindTraits2::destroy(alloc2, this);
RebindTraits2::deallocate(alloc2, this, 1);
w.second.get_executor().dispatch(std::move(f), alloc2);
}
void destroy_post(std::tuple<Args...>&& args) override {
auto w = std::move(work);
auto f = bind_and_forward(std::move(handler), std::move(args));
RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler);
RebindTraits2::destroy(alloc2, this);
RebindTraits2::deallocate(alloc2, this, 1);
w.second.get_executor().post(std::move(f), alloc2);
}
void destroy() override {
RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler);
RebindTraits2::destroy(alloc2, this);
RebindTraits2::deallocate(alloc2, this, 1);
}
// constructor is private, use create(). extra constructor arguments are
// forwarded to UserData
template <typename ...TArgs>
CompletionImpl(const Executor1& ex1, Handler&& handler, TArgs&& ...args)
: Completion<void(Args...), T>(std::forward<TArgs>(args)...),
work(ex1, boost::asio::make_work_guard(handler, ex1)),
handler(std::move(handler))
{}
public:
template <typename ...TArgs>
static auto create(const Executor1& ex, Handler&& handler, TArgs&& ...args) {
auto alloc2 = boost::asio::get_associated_allocator(handler);
using Ptr = std::unique_ptr<CompletionImpl>;
return Ptr{new (alloc2) CompletionImpl(ex, std::move(handler),
std::forward<TArgs>(args)...)};
}
static void operator delete(void *p) {
static_cast<CompletionImpl*>(p)->destroy();
}
};
} // namespace detail
template <typename T, typename ...Args>
template <typename Executor1, typename Handler, typename ...TArgs>
std::unique_ptr<Completion<void(Args...), T>>
Completion<void(Args...), T>::create(const Executor1& ex,
Handler&& handler, TArgs&& ...args)
{
using Impl = detail::CompletionImpl<Executor1, Handler, T, Args...>;
return Impl::create(ex, std::forward<Handler>(handler),
std::forward<TArgs>(args)...);
}
template <typename T, typename ...Args>
template <typename ...Args2>
void Completion<void(Args...), T>::defer(std::unique_ptr<Completion>&& ptr,
Args2&& ...args)
{
auto c = ptr.release();
c->destroy_defer(std::make_tuple(std::forward<Args2>(args)...));
}
template <typename T, typename ...Args>
template <typename ...Args2>
void Completion<void(Args...), T>::dispatch(std::unique_ptr<Completion>&& ptr,
Args2&& ...args)
{
auto c = ptr.release();
c->destroy_dispatch(std::make_tuple(std::forward<Args2>(args)...));
}
template <typename T, typename ...Args>
template <typename ...Args2>
void Completion<void(Args...), T>::post(std::unique_ptr<Completion>&& ptr,
Args2&& ...args)
{
auto c = ptr.release();
c->destroy_post(std::make_tuple(std::forward<Args2>(args)...));
}
/// completion factory function that uses the handler's associated allocator.
/// any additional arguments are forwared to T's constructor
template <typename Signature, typename T, typename Executor1,
typename Handler, typename ...TArgs>
std::unique_ptr<Completion<Signature, T>>
create_completion(const Executor1& ex, Handler&& handler, TArgs&& ...args)
{
return Completion<Signature, T>::create(ex, std::forward<Handler>(handler),
std::forward<TArgs>(args)...);
}
/// take ownership of the completion, bind any arguments to the completion
/// handler, then defer() it on its associated executor
template <typename Signature, typename T, typename ...Args>
void defer(std::unique_ptr<Completion<Signature, T>>&& ptr, Args&& ...args)
{
Completion<Signature, T>::defer(std::move(ptr), std::forward<Args>(args)...);
}
/// take ownership of the completion, bind any arguments to the completion
/// handler, then dispatch() it on its associated executor
template <typename Signature, typename T, typename ...Args>
void dispatch(std::unique_ptr<Completion<Signature, T>>&& ptr, Args&& ...args)
{
Completion<Signature, T>::dispatch(std::move(ptr), std::forward<Args>(args)...);
}
/// take ownership of the completion, bind any arguments to the completion
/// handler, then post() it to its associated executor
template <typename Signature, typename T, typename ...Args>
void post(std::unique_ptr<Completion<Signature, T>>&& ptr, Args&& ...args)
{
Completion<Signature, T>::post(std::move(ptr), std::forward<Args>(args)...);
}
} // namespace ceph::async
#endif // CEPH_ASYNC_COMPLETION_H
| 12,465 | 37.834891 | 82 | h |
null | ceph-main/src/common/async/context_pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_ASYNC_CONTEXT_POOL_H
#define CEPH_COMMON_ASYNC_CONTEXT_POOL_H
#include <cstddef>
#include <cstdint>
#include <mutex>
#include <optional>
#include <thread>
#include <vector>
#include <boost/asio/io_context.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include "common/ceph_mutex.h"
#include "common/Thread.h"
namespace ceph::async {
class io_context_pool {
std::vector<std::thread> threadvec;
boost::asio::io_context ioctx;
std::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>> guard;
ceph::mutex m = make_mutex("ceph::io_context_pool::m");
void cleanup() noexcept {
guard = std::nullopt;
for (auto& th : threadvec) {
th.join();
}
threadvec.clear();
}
public:
io_context_pool() noexcept {}
io_context_pool(std::int16_t threadcnt) noexcept {
start(threadcnt);
}
~io_context_pool() {
stop();
}
void start(std::int16_t threadcnt) noexcept {
auto l = std::scoped_lock(m);
if (threadvec.empty()) {
guard.emplace(boost::asio::make_work_guard(ioctx));
ioctx.restart();
for (std::int16_t i = 0; i < threadcnt; ++i) {
threadvec.emplace_back(make_named_thread("io_context_pool",
[this]() {
ioctx.run();
}));
}
}
}
void finish() noexcept {
auto l = std::scoped_lock(m);
if (!threadvec.empty()) {
cleanup();
}
}
void stop() noexcept {
auto l = std::scoped_lock(m);
if (!threadvec.empty()) {
ioctx.stop();
cleanup();
}
}
boost::asio::io_context& get_io_context() {
return ioctx;
}
operator boost::asio::io_context&() {
return ioctx;
}
boost::asio::io_context::executor_type get_executor() {
return ioctx.get_executor();
}
};
}
#endif // CEPH_COMMON_ASYNC_CONTEXT_POOL_H
| 2,287 | 23.084211 | 70 | h |
null | ceph-main/src/common/async/forward_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ASYNC_FORWARD_HANDLER_H
#define CEPH_ASYNC_FORWARD_HANDLER_H
#include <boost/asio.hpp>
namespace ceph::async {
/**
* A forwarding completion handler for use with boost::asio.
*
* A completion handler wrapper that invokes the handler's operator() as an
* rvalue, regardless of whether the wrapper is invoked as an lvalue or rvalue.
* This operation is potentially destructive to the wrapped handler, so is only
* suitable for single-use handlers.
*
* This is useful when combined with bind_handler() and move-only arguments,
* because executors will always call the lvalue overload of operator().
*
* The original Handler's associated allocator and executor are maintained.
*
* @see forward_handler
*/
template <typename Handler>
struct ForwardingHandler {
Handler handler;
ForwardingHandler(Handler&& handler)
: handler(std::move(handler))
{}
template <typename ...Args>
void operator()(Args&& ...args) {
std::move(handler)(std::forward<Args>(args)...);
}
using allocator_type = boost::asio::associated_allocator_t<Handler>;
allocator_type get_allocator() const noexcept {
return boost::asio::get_associated_allocator(handler);
}
};
} // namespace ceph::async
namespace boost::asio {
// specialize boost::asio::associated_executor<> for ForwardingHandler
template <typename Handler, typename Executor>
struct associated_executor<ceph::async::ForwardingHandler<Handler>, Executor> {
using type = boost::asio::associated_executor_t<Handler, Executor>;
static type get(const ceph::async::ForwardingHandler<Handler>& handler,
const Executor& ex = Executor()) noexcept {
return boost::asio::get_associated_executor(handler.handler, ex);
}
};
} // namespace boost::asio
namespace ceph::async {
/**
* Returns a single-use completion handler that always forwards on operator().
*
* Wraps a completion handler such that it is always invoked as an rvalue. This
* is necessary when combining executors and bind_handler() with move-only
* argument types.
*
* Example use:
*
* auto callback = [] (std::unique_ptr<int>&& p) {};
* auto bound_handler = bind_handler(callback, std::make_unique<int>(5));
* auro handler = forward_handler(std::move(bound_handler));
*
* // execute the forwarding handler on an io_context:
* boost::asio::io_context context;
* boost::asio::post(context, std::move(handler));
* context.run();
*
* @see ForwardingHandler
*/
template <typename Handler>
auto forward_handler(Handler&& h)
{
return ForwardingHandler{std::forward<Handler>(h)};
}
} // namespace ceph::async
#endif // CEPH_ASYNC_FORWARD_HANDLER_H
| 3,052 | 28.355769 | 79 | h |
null | ceph-main/src/common/async/librados_completion.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_ASYNC_LIBRADOS_COMPLETION_H
#define CEPH_COMMON_ASYNC_LIBRADOS_COMPLETION_H
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <optional>
#include <type_traits>
#include <boost/asio/async_result.hpp>
#include <boost/system/error_code.hpp>
#include <boost/system/system_error.hpp>
#include "include/rados/librados.hpp"
#include "librados/AioCompletionImpl.h"
// Allow librados::AioCompletion to be provided as a completion
// handler. This is only allowed with a signature of
// (boost::system::error_code) or (). On completion the AioCompletion
// is completed with the error_code converted to an int with
// ceph::from_error_code.
//
// async_result::return_type is void.
namespace ceph::async {
namespace bs = boost::system;
namespace lr = librados;
namespace detail {
struct librados_handler {
lr::AioCompletionImpl* pc;
explicit librados_handler(lr::AioCompletion* c) : pc(c->pc) {
pc->get();
}
~librados_handler() {
if (pc) {
pc->put();
pc = nullptr;
}
}
librados_handler(const librados_handler&) = delete;
librados_handler& operator =(const librados_handler&) = delete;
librados_handler(librados_handler&& rhs) {
pc = rhs.pc;
rhs.pc = nullptr;
}
void operator()(bs::error_code ec) {
pc->lock.lock();
pc->rval = ceph::from_error_code(ec);
pc->complete = true;
pc->lock.unlock();
auto cb_complete = pc->callback_complete;
auto cb_complete_arg = pc->callback_complete_arg;
if (cb_complete)
cb_complete(pc, cb_complete_arg);
auto cb_safe = pc->callback_safe;
auto cb_safe_arg = pc->callback_safe_arg;
if (cb_safe)
cb_safe(pc, cb_safe_arg);
pc->lock.lock();
pc->callback_complete = NULL;
pc->callback_safe = NULL;
pc->cond.notify_all();
pc->put_unlock();
pc = nullptr;
}
void operator ()() {
(*this)(bs::error_code{});
}
};
} // namespace detail
} // namespace ceph::async
namespace boost::asio {
template<typename ReturnType>
class async_result<librados::AioCompletion*, ReturnType()> {
public:
using completion_handler_type = ceph::async::detail::librados_handler;
explicit async_result(completion_handler_type&) {};
using return_type = void;
void get() {
return;
}
};
template<typename ReturnType>
class async_result<librados::AioCompletion*,
ReturnType(boost::system::error_code)> {
public:
using completion_handler_type = ceph::async::detail::librados_handler;
explicit async_result(completion_handler_type&) {};
using return_type = void;
void get() {
return;
}
};
}
#endif // !CEPH_COMMON_ASYNC_LIBRADOS_COMPLETION_H
| 3,105 | 23.650794 | 72 | h |
null | ceph-main/src/common/async/shared_mutex.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/async/detail/shared_mutex.h"
namespace ceph::async {
/**
* An asynchronous shared mutex for use with boost::asio.
*
* A shared mutex class with asynchronous lock operations that complete on a
* boost::asio executor. The class also has synchronous interfaces that meet
* most of the standard library's requirements for the SharedMutex concept,
* which makes it compatible with lock_guard, unique_lock, and shared_lock.
*
* All lock requests can fail with operation_aborted on cancel() or destruction.
* The non-error_code overloads of lock() and lock_shared() will throw this
* error as an exception of type boost::system::system_error.
*
* Exclusive locks are prioritized over shared locks. Locks of the same type
* are granted in fifo order. The implementation defines a limit on the number
* of shared locks to 65534 at a time.
*
* Example use:
*
* boost::asio::io_context context;
* SharedMutex mutex{context.get_executor()};
*
* mutex.async_lock([&] (boost::system::error_code ec, auto lock) {
* if (!ec) {
* // mutate shared state ...
* }
* });
* mutex.async_lock_shared([&] (boost::system::error_code ec, auto lock) {
* if (!ec) {
* // read shared state ...
* }
* });
*
* context.run();
*/
template <typename Executor>
class SharedMutex {
public:
explicit SharedMutex(const Executor& ex);
/// on destruction, all pending lock requests are canceled
~SharedMutex();
using executor_type = Executor;
executor_type get_executor() const noexcept { return ex; }
/// initiate an asynchronous request for an exclusive lock. when the lock is
/// granted, the completion handler is invoked with a successful error code
/// and a std::unique_lock that owns this mutex.
/// Signature = void(boost::system::error_code, std::unique_lock)
template <typename CompletionToken>
auto async_lock(CompletionToken&& token);
/// wait synchronously for an exclusive lock. if an error occurs before the
/// lock is granted, that error is thrown as an exception
void lock();
/// wait synchronously for an exclusive lock. if an error occurs before the
/// lock is granted, that error is assigned to 'ec'
void lock(boost::system::error_code& ec);
/// try to acquire an exclusive lock. if the lock is not immediately
/// available, returns false
bool try_lock();
/// releases an exclusive lock. not required to be called from the same thread
/// that initiated the lock
void unlock();
/// initiate an asynchronous request for a shared lock. when the lock is
/// granted, the completion handler is invoked with a successful error code
/// and a std::shared_lock that owns this mutex.
/// Signature = void(boost::system::error_code, std::shared_lock)
template <typename CompletionToken>
auto async_lock_shared(CompletionToken&& token);
/// wait synchronously for a shared lock. if an error occurs before the
/// lock is granted, that error is thrown as an exception
void lock_shared();
/// wait synchronously for a shared lock. if an error occurs before the lock
/// is granted, that error is assigned to 'ec'
void lock_shared(boost::system::error_code& ec);
/// try to acquire a shared lock. if the lock is not immediately available,
/// returns false
bool try_lock_shared();
/// releases a shared lock. not required to be called from the same thread
/// that initiated the lock
void unlock_shared();
/// cancel any pending requests for exclusive or shared locks with an
/// operation_aborted error
void cancel();
private:
Executor ex; //< default callback executor
boost::intrusive_ptr<detail::SharedMutexImpl> impl;
// allow lock guards to access impl
friend class std::unique_lock<SharedMutex>;
friend class std::shared_lock<SharedMutex>;
};
template <typename Executor>
SharedMutex<Executor>::SharedMutex(const Executor& ex)
: ex(ex), impl(new detail::SharedMutexImpl)
{
}
template <typename Executor>
SharedMutex<Executor>::~SharedMutex()
{
try {
impl->cancel();
} catch (const std::exception&) {
// swallow any exceptions, the destructor can't throw
}
}
template <typename Executor>
template <typename CompletionToken>
auto SharedMutex<Executor>::async_lock(CompletionToken&& token)
{
return impl->async_lock(*this, std::forward<CompletionToken>(token));
}
template <typename Executor>
void SharedMutex<Executor>::lock()
{
impl->lock();
}
template <typename Executor>
void SharedMutex<Executor>::lock(boost::system::error_code& ec)
{
impl->lock(ec);
}
template <typename Executor>
bool SharedMutex<Executor>::try_lock()
{
return impl->try_lock();
}
template <typename Executor>
void SharedMutex<Executor>::unlock()
{
impl->unlock();
}
template <typename Executor>
template <typename CompletionToken>
auto SharedMutex<Executor>::async_lock_shared(CompletionToken&& token)
{
return impl->async_lock_shared(*this, std::forward<CompletionToken>(token));
}
template <typename Executor>
void SharedMutex<Executor>::lock_shared()
{
impl->lock_shared();
}
template <typename Executor>
void SharedMutex<Executor>::lock_shared(boost::system::error_code& ec)
{
impl->lock_shared(ec);
}
template <typename Executor>
bool SharedMutex<Executor>::try_lock_shared()
{
return impl->try_lock_shared();
}
template <typename Executor>
void SharedMutex<Executor>::unlock_shared()
{
impl->unlock_shared();
}
template <typename Executor>
void SharedMutex<Executor>::cancel()
{
impl->cancel();
}
} // namespace ceph::async
#include "common/async/detail/shared_lock.h"
| 6,016 | 27.248826 | 80 | h |
null | ceph-main/src/common/async/waiter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_WAITER_H
#define CEPH_COMMON_WAITER_H
#include <condition_variable>
#include <tuple>
#include <boost/asio/async_result.hpp>
#include "include/ceph_assert.h"
#include "include/function2.hpp"
#include "common/ceph_mutex.h"
namespace ceph::async {
namespace detail {
// For safety reasons (avoiding undefined behavior around sequence
// points) std::reference_wrapper disallows move construction. This
// harms us in cases where we want to pass a reference in to something
// that unavoidably moves.
//
// It should not be used generally.
template<typename T>
class rvalue_reference_wrapper {
public:
// types
using type = T;
rvalue_reference_wrapper(T& r) noexcept
: p(std::addressof(r)) {}
// We write our semantics to match those of reference collapsing. If
// we're treated as an lvalue, collapse to one.
rvalue_reference_wrapper(const rvalue_reference_wrapper&) noexcept = default;
rvalue_reference_wrapper(rvalue_reference_wrapper&&) noexcept = default;
// assignment
rvalue_reference_wrapper& operator=(
const rvalue_reference_wrapper& x) noexcept = default;
rvalue_reference_wrapper& operator=(
rvalue_reference_wrapper&& x) noexcept = default;
operator T& () const noexcept {
return *p;
}
T& get() const noexcept {
return *p;
}
operator T&& () noexcept {
return std::move(*p);
}
T&& get() noexcept {
return std::move(*p);
}
template<typename... Args>
std::result_of_t<T&(Args&&...)> operator ()(Args&&... args ) const {
return (*p)(std::forward<Args>(args)...);
}
template<typename... Args>
std::result_of_t<T&&(Args&&...)> operator ()(Args&&... args ) {
return std::move(*p)(std::forward<Args>(args)...);
}
private:
T* p;
};
class base {
protected:
ceph::mutex lock = ceph::make_mutex("ceph::async::detail::base::lock");
ceph::condition_variable cond;
bool has_value = false;
~base() = default;
auto wait_base() {
std::unique_lock l(lock);
cond.wait(l, [this](){ return has_value; });
return l;
}
auto exec_base() {
std::unique_lock l(lock);
// There's no really good way to handle being called twice
// without being reset.
ceph_assert(!has_value);
has_value = true;
cond.notify_one();
return l;
}
};
}
// waiter is a replacement for C_SafeCond and friends. It is the
// moral equivalent of a future but plays well with a world of
// callbacks.
template<typename ...S>
class waiter;
template<>
class waiter<> final : public detail::base {
public:
void wait() {
wait_base();
has_value = false;
}
void operator()() {
exec_base();
}
auto ref() {
return detail::rvalue_reference_wrapper(*this);
}
operator fu2::unique_function<void() &&>() {
return fu2::unique_function<void() &&>(ref());
}
};
template<typename Ret>
class waiter<Ret> final : public detail::base {
std::aligned_storage_t<sizeof(Ret)> ret;
public:
Ret wait() {
auto l = wait_base();
auto r = reinterpret_cast<Ret*>(&ret);
auto t = std::move(*r);
r->~Ret();
has_value = false;
return t;
}
void operator()(Ret&& _ret) {
auto l = exec_base();
auto r = reinterpret_cast<Ret*>(&ret);
*r = std::move(_ret);
}
void operator()(const Ret& _ret) {
auto l = exec_base();
auto r = reinterpret_cast<Ret*>(&ret);
*r = std::move(_ret);
}
auto ref() {
return detail::rvalue_reference_wrapper(*this);
}
operator fu2::unique_function<void(Ret) &&>() {
return fu2::unique_function<void(Ret) &&>(ref());
}
~waiter() {
if (has_value)
reinterpret_cast<Ret*>(&ret)->~Ret();
}
};
template<typename ...Ret>
class waiter final : public detail::base {
std::tuple<Ret...> ret;
public:
std::tuple<Ret...> wait() {
using std::tuple;
auto l = wait_base();
return std::move(ret);
auto r = reinterpret_cast<std::tuple<Ret...>*>(&ret);
auto t = std::move(*r);
r->~tuple<Ret...>();
has_value = false;
return t;
}
void operator()(Ret&&... _ret) {
auto l = exec_base();
auto r = reinterpret_cast<std::tuple<Ret...>*>(&ret);
*r = std::forward_as_tuple(_ret...);
}
void operator()(const Ret&... _ret) {
auto l = exec_base();
auto r = reinterpret_cast<std::tuple<Ret...>*>(&ret);
*r = std::forward_as_tuple(_ret...);
}
auto ref() {
return detail::rvalue_reference_wrapper(*this);
}
operator fu2::unique_function<void(Ret...) &&>() {
return fu2::unique_function<void(Ret...) &&>(ref());
}
~waiter() {
using std::tuple;
if (has_value)
reinterpret_cast<tuple<Ret...>*>(&ret)->~tuple<Ret...>();
}
};
}
#endif // CEPH_COMMON_WAITER_H
| 5,118 | 21.852679 | 79 | h |
null | ceph-main/src/common/async/yield_context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <boost/asio/io_context.hpp>
#include "acconfig.h"
#include <spawn/spawn.hpp>
// use explicit executor types instead of the type-erased boost::asio::executor.
// coroutines wrap the default io_context executor with a strand executor
using yield_context = spawn::basic_yield_context<
boost::asio::executor_binder<void(*)(),
boost::asio::strand<boost::asio::io_context::executor_type>>>;
/// optional-like wrapper for a spawn::yield_context and its associated
/// boost::asio::io_context. operations that take an optional_yield argument
/// will, when passed a non-empty yield context, suspend this coroutine instead
/// of the blocking the thread of execution
class optional_yield {
boost::asio::io_context *c = nullptr;
yield_context *y = nullptr;
public:
/// construct with a valid io and yield_context
explicit optional_yield(boost::asio::io_context& c,
yield_context& y) noexcept
: c(&c), y(&y) {}
/// type tag to construct an empty object
struct empty_t {};
optional_yield(empty_t) noexcept {}
/// implicit conversion to bool, returns true if non-empty
operator bool() const noexcept { return y; }
/// return a reference to the associated io_context. only valid if non-empty
boost::asio::io_context& get_io_context() const noexcept { return *c; }
/// return a reference to the yield_context. only valid if non-empty
yield_context& get_yield_context() const noexcept { return *y; }
};
// type tag object to construct an empty optional_yield
static constexpr optional_yield::empty_t null_yield{};
| 2,067 | 33.466667 | 80 | h |
null | ceph-main/src/common/async/detail/shared_lock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
namespace std {
// specialize unique_lock and shared_lock for SharedMutex to operate on
// SharedMutexImpl instead, because the locks may outlive the SharedMutex itself
template <typename Executor>
class unique_lock<ceph::async::SharedMutex<Executor>> {
public:
using mutex_type = boost::intrusive_ptr<ceph::async::detail::SharedMutexImpl>;
unique_lock() = default;
explicit unique_lock(ceph::async::SharedMutex<Executor>& m)
: impl(m.impl), locked(true)
{
impl->lock();
}
unique_lock(ceph::async::SharedMutex<Executor>& m, defer_lock_t t) noexcept
: impl(m.impl)
{}
unique_lock(ceph::async::SharedMutex<Executor>& m, try_to_lock_t t)
: impl(m.impl), locked(impl->try_lock())
{}
unique_lock(ceph::async::SharedMutex<Executor>& m, adopt_lock_t t) noexcept
: impl(m.impl), locked(true)
{}
~unique_lock() {
if (impl && locked)
impl->unlock();
}
unique_lock(unique_lock&& other) noexcept
: impl(std::move(other.impl)),
locked(other.locked) {
other.locked = false;
}
unique_lock& operator=(unique_lock&& other) noexcept {
if (impl && locked) {
impl->unlock();
}
impl = std::move(other.impl);
locked = other.locked;
other.locked = false;
return *this;
}
void swap(unique_lock& other) noexcept {
using std::swap;
swap(impl, other.impl);
swap(locked, other.locked);
}
mutex_type mutex() const noexcept { return impl; }
bool owns_lock() const noexcept { return impl && locked; }
explicit operator bool() const noexcept { return impl && locked; }
mutex_type release() {
auto result = std::move(impl);
locked = false;
return result;
}
void lock() {
if (!impl)
throw system_error(make_error_code(errc::operation_not_permitted));
if (locked)
throw system_error(make_error_code(errc::resource_deadlock_would_occur));
impl->lock();
locked = true;
}
bool try_lock() {
if (!impl)
throw system_error(make_error_code(errc::operation_not_permitted));
if (locked)
throw system_error(make_error_code(errc::resource_deadlock_would_occur));
return locked = impl->try_lock();
}
void unlock() {
if (!impl || !locked)
throw system_error(make_error_code(errc::operation_not_permitted));
impl->unlock();
locked = false;
}
private:
mutex_type impl;
bool locked{false};
};
template <typename Executor>
class shared_lock<ceph::async::SharedMutex<Executor>> {
public:
using mutex_type = boost::intrusive_ptr<ceph::async::detail::SharedMutexImpl>;
shared_lock() = default;
explicit shared_lock(ceph::async::SharedMutex<Executor>& m)
: impl(m.impl), locked(true)
{
impl->lock_shared();
}
shared_lock(ceph::async::SharedMutex<Executor>& m, defer_lock_t t) noexcept
: impl(m.impl)
{}
shared_lock(ceph::async::SharedMutex<Executor>& m, try_to_lock_t t)
: impl(m.impl), locked(impl->try_lock_shared())
{}
shared_lock(ceph::async::SharedMutex<Executor>& m, adopt_lock_t t) noexcept
: impl(m.impl), locked(true)
{}
~shared_lock() {
if (impl && locked)
impl->unlock_shared();
}
shared_lock(shared_lock&& other) noexcept
: impl(std::move(other.impl)),
locked(other.locked) {
other.locked = false;
}
shared_lock& operator=(shared_lock&& other) noexcept {
if (impl && locked) {
impl->unlock_shared();
}
impl = std::move(other.impl);
locked = other.locked;
other.locked = false;
return *this;
}
void swap(shared_lock& other) noexcept {
using std::swap;
swap(impl, other.impl);
swap(locked, other.locked);
}
mutex_type mutex() const noexcept { return impl; }
bool owns_lock() const noexcept { return impl && locked; }
explicit operator bool() const noexcept { return impl && locked; }
mutex_type release() {
auto result = std::move(impl);
locked = false;
return result;
}
void lock() {
if (!impl)
throw system_error(make_error_code(errc::operation_not_permitted));
if (locked)
throw system_error(make_error_code(errc::resource_deadlock_would_occur));
impl->lock_shared();
locked = true;
}
bool try_lock() {
if (!impl)
throw system_error(make_error_code(errc::operation_not_permitted));
if (locked)
throw system_error(make_error_code(errc::resource_deadlock_would_occur));
return locked = impl->try_lock_shared();
}
void unlock() {
if (!impl || !locked)
throw system_error(make_error_code(errc::operation_not_permitted));
impl->unlock_shared();
locked = false;
}
private:
mutex_type impl;
bool locked{false};
};
} // namespace std
| 5,078 | 26.306452 | 80 | h |
null | ceph-main/src/common/async/detail/shared_mutex.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <condition_variable>
#include <mutex>
#include <optional>
#include <shared_mutex> // for std::shared_lock
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/intrusive/list.hpp>
#include "include/ceph_assert.h"
#include "common/async/completion.h"
namespace ceph::async::detail {
struct LockRequest : public boost::intrusive::list_base_hook<> {
virtual ~LockRequest() {}
virtual void complete(boost::system::error_code ec) = 0;
virtual void destroy() = 0;
};
class SharedMutexImpl : public boost::intrusive_ref_counter<SharedMutexImpl> {
public:
~SharedMutexImpl();
template <typename Mutex, typename CompletionToken>
auto async_lock(Mutex& mtx, CompletionToken&& token);
void lock();
void lock(boost::system::error_code& ec);
bool try_lock();
void unlock();
template <typename Mutex, typename CompletionToken>
auto async_lock_shared(Mutex& mtx, CompletionToken&& token);
void lock_shared();
void lock_shared(boost::system::error_code& ec);
bool try_lock_shared();
void unlock_shared();
void cancel();
private:
using RequestList = boost::intrusive::list<LockRequest>;
RequestList shared_queue; //< requests waiting on a shared lock
RequestList exclusive_queue; //< requests waiting on an exclusive lock
/// lock state encodes the number of shared lockers, or 'max' for exclusive
using LockState = uint16_t;
static constexpr LockState Unlocked = 0;
static constexpr LockState Exclusive = std::numeric_limits<LockState>::max();
static constexpr LockState MaxShared = Exclusive - 1;
LockState state = Unlocked; //< current lock state
std::mutex mutex; //< protects lock state and wait queues
void complete(RequestList&& requests, boost::system::error_code ec);
};
// sync requests live on the stack and wait on a condition variable
class SyncRequest : public LockRequest {
std::condition_variable cond;
std::optional<boost::system::error_code> ec;
public:
boost::system::error_code wait(std::unique_lock<std::mutex>& lock) {
// return the error code once its been set
cond.wait(lock, [this] { return ec; });
return *ec;
}
void complete(boost::system::error_code ec) override {
this->ec = ec;
cond.notify_one();
}
void destroy() override {
// nothing, SyncRequests live on the stack
}
};
// async requests use async::Completion to invoke a handler on its executor
template <typename Mutex, template <typename> typename Lock>
class AsyncRequest : public LockRequest {
Mutex& mutex; //< mutex argument for lock guard
public:
explicit AsyncRequest(Mutex& mutex) : mutex(mutex) {}
using Signature = void(boost::system::error_code, Lock<Mutex>);
using LockCompletion = Completion<Signature, AsBase<AsyncRequest>>;
void complete(boost::system::error_code ec) override {
auto r = static_cast<LockCompletion*>(this);
// pass ownership of ourselves to post(). on error, pass an empty lock
post(std::unique_ptr<LockCompletion>{r}, ec,
ec ? Lock{mutex, std::defer_lock} : Lock{mutex, std::adopt_lock});
}
void destroy() override {
delete static_cast<LockCompletion*>(this);
}
};
inline SharedMutexImpl::~SharedMutexImpl()
{
ceph_assert(state == Unlocked);
ceph_assert(shared_queue.empty());
ceph_assert(exclusive_queue.empty());
}
template <typename Mutex, typename CompletionToken>
auto SharedMutexImpl::async_lock(Mutex& mtx, CompletionToken&& token)
{
using Request = AsyncRequest<Mutex, std::unique_lock>;
using Signature = typename Request::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto& handler = init.completion_handler;
auto ex1 = mtx.get_executor();
{
std::lock_guard lock{mutex};
boost::system::error_code ec;
if (state == Unlocked) {
state = Exclusive;
// post a successful completion
auto ex2 = boost::asio::get_associated_executor(handler, ex1);
auto alloc2 = boost::asio::get_associated_allocator(handler);
auto b = bind_handler(std::move(handler), ec,
std::unique_lock{mtx, std::adopt_lock});
ex2.post(forward_handler(std::move(b)), alloc2);
} else {
// create a request and add it to the exclusive list
using LockCompletion = typename Request::LockCompletion;
auto request = LockCompletion::create(ex1, std::move(handler), mtx);
exclusive_queue.push_back(*request.release());
}
}
return init.result.get();
}
inline void SharedMutexImpl::lock()
{
boost::system::error_code ec;
lock(ec);
if (ec) {
throw boost::system::system_error(ec);
}
}
void SharedMutexImpl::lock(boost::system::error_code& ec)
{
std::unique_lock lock{mutex};
if (state == Unlocked) {
state = Exclusive;
ec.clear();
} else {
SyncRequest request;
exclusive_queue.push_back(request);
ec = request.wait(lock);
}
}
inline bool SharedMutexImpl::try_lock()
{
std::lock_guard lock{mutex};
if (state == Unlocked) {
state = Exclusive;
return true;
}
return false;
}
void SharedMutexImpl::unlock()
{
RequestList granted;
{
std::lock_guard lock{mutex};
ceph_assert(state == Exclusive);
if (!exclusive_queue.empty()) {
// grant next exclusive lock
auto& request = exclusive_queue.front();
exclusive_queue.pop_front();
granted.push_back(request);
} else {
// grant shared locks, if any
state = shared_queue.size();
if (state > MaxShared) {
state = MaxShared;
auto end = std::next(shared_queue.begin(), MaxShared);
granted.splice(granted.end(), shared_queue,
shared_queue.begin(), end, MaxShared);
} else {
granted.splice(granted.end(), shared_queue);
}
}
}
complete(std::move(granted), boost::system::error_code{});
}
template <typename Mutex, typename CompletionToken>
auto SharedMutexImpl::async_lock_shared(Mutex& mtx, CompletionToken&& token)
{
using Request = AsyncRequest<Mutex, std::shared_lock>;
using Signature = typename Request::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto& handler = init.completion_handler;
auto ex1 = mtx.get_executor();
{
std::lock_guard lock{mutex};
boost::system::error_code ec;
if (exclusive_queue.empty() && state < MaxShared) {
state++;
auto ex2 = boost::asio::get_associated_executor(handler, ex1);
auto alloc2 = boost::asio::get_associated_allocator(handler);
auto b = bind_handler(std::move(handler), ec,
std::shared_lock{mtx, std::adopt_lock});
ex2.post(forward_handler(std::move(b)), alloc2);
} else {
using LockCompletion = typename Request::LockCompletion;
auto request = LockCompletion::create(ex1, std::move(handler), mtx);
shared_queue.push_back(*request.release());
}
}
return init.result.get();
}
inline void SharedMutexImpl::lock_shared()
{
boost::system::error_code ec;
lock_shared(ec);
if (ec) {
throw boost::system::system_error(ec);
}
}
void SharedMutexImpl::lock_shared(boost::system::error_code& ec)
{
std::unique_lock lock{mutex};
if (exclusive_queue.empty() && state < MaxShared) {
state++;
ec.clear();
} else {
SyncRequest request;
shared_queue.push_back(request);
ec = request.wait(lock);
}
}
inline bool SharedMutexImpl::try_lock_shared()
{
std::lock_guard lock{mutex};
if (exclusive_queue.empty() && state < MaxShared) {
state++;
return true;
}
return false;
}
inline void SharedMutexImpl::unlock_shared()
{
std::lock_guard lock{mutex};
ceph_assert(state != Unlocked && state <= MaxShared);
if (state == 1 && !exclusive_queue.empty()) {
// grant next exclusive lock
state = Exclusive;
auto& request = exclusive_queue.front();
exclusive_queue.pop_front();
request.complete(boost::system::error_code{});
} else if (state == MaxShared && !shared_queue.empty() &&
exclusive_queue.empty()) {
// grant next shared lock
auto& request = shared_queue.front();
shared_queue.pop_front();
request.complete(boost::system::error_code{});
} else {
state--;
}
}
inline void SharedMutexImpl::cancel()
{
RequestList canceled;
{
std::lock_guard lock{mutex};
canceled.splice(canceled.end(), shared_queue);
canceled.splice(canceled.end(), exclusive_queue);
}
complete(std::move(canceled), boost::asio::error::operation_aborted);
}
void SharedMutexImpl::complete(RequestList&& requests,
boost::system::error_code ec)
{
while (!requests.empty()) {
auto& request = requests.front();
requests.pop_front();
try {
request.complete(ec);
} catch (...) {
// clean up any remaining completions and rethrow
requests.clear_and_dispose([] (LockRequest *r) { r->destroy(); });
throw;
}
}
}
} // namespace ceph::async::detail
| 9,401 | 27.752294 | 79 | h |
null | ceph-main/src/common/detail/construct_suspended.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_DETAIL_CONSTRUCT_SUSPENDED_H
#define CEPH_COMMON_DETAIL_CONSTRUCT_SUSPENDED_H
namespace ceph {
struct construct_suspended_t { };
inline constexpr construct_suspended_t construct_suspended { };
}
#endif // CEPH_COMMON_DETAIL_CONSTRUCT_SUSPENDED_H
| 741 | 28.68 | 70 | h |
null | ceph-main/src/common/options/build_options.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "build_options.h"
#include <algorithm>
#include <cstring>
std::vector<Option> get_global_options();
std::vector<Option> get_mgr_options();
std::vector<Option> get_mon_options();
std::vector<Option> get_crimson_options();
std::vector<Option> get_osd_options();
std::vector<Option> get_rgw_options();
std::vector<Option> get_rbd_options();
std::vector<Option> get_rbd_mirror_options();
std::vector<Option> get_immutable_object_cache_options();
std::vector<Option> get_mds_options();
std::vector<Option> get_mds_client_options();
std::vector<Option> get_cephfs_mirror_options();
std::vector<Option> get_ceph_exporter_options();
std::vector<Option> build_options()
{
std::vector<Option> result = get_global_options();
auto ingest = [&result](std::vector<Option>&& options, const char* svc) {
for (auto &o : options) {
if (std::none_of(o.services.begin(), o.services.end(),
[svc](const char* known_svc) {
return std::strcmp(known_svc, svc) == 0;
})) {
o.add_service(svc);
}
result.push_back(std::move(o));
}
};
ingest(get_crimson_options(), "osd");
ingest(get_mgr_options(), "mgr");
ingest(get_mon_options(), "mon");
ingest(get_osd_options(), "osd");
ingest(get_rgw_options(), "rgw");
ingest(get_rbd_options(), "rbd");
ingest(get_rbd_mirror_options(), "rbd-mirror");
ingest(get_immutable_object_cache_options(), "immutable-object-cache");
ingest(get_mds_options(), "mds");
ingest(get_mds_client_options(), "mds_client");
ingest(get_cephfs_mirror_options(), "cephfs-mirror");
ingest(get_ceph_exporter_options(), "ceph-exporter");
return result;
}
| 1,792 | 32.203704 | 75 | cc |
null | ceph-main/src/common/options/build_options.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <vector>
#include "common/options.h"
std::vector<Option> build_options();
| 196 | 20.888889 | 70 | h |
null | ceph-main/src/common/options/legacy_config_opts.h | #include "global_legacy_options.h"
#include "cephfs-mirror_legacy_options.h"
#include "mds_legacy_options.h"
#include "mds-client_legacy_options.h"
#include "mgr_legacy_options.h"
#include "mon_legacy_options.h"
#include "osd_legacy_options.h"
#include "rbd_legacy_options.h"
#include "rbd-mirror_legacy_options.h"
#include "immutable-object-cache_legacy_options.h"
#include "rgw_legacy_options.h"
| 398 | 32.25 | 50 | h |
null | ceph-main/src/common/win32/SubProcess.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdarg.h>
#include <fcntl.h>
#include <unistd.h>
#include <iostream>
#include <iomanip>
#include "common/SubProcess.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "include/compat.h"
SubProcess::SubProcess(const char *cmd_, std_fd_op stdin_op_, std_fd_op stdout_op_, std_fd_op stderr_op_) :
cmd(cmd_),
cmd_args(),
stdin_op(stdin_op_),
stdout_op(stdout_op_),
stderr_op(stderr_op_),
stdin_pipe_out_fd(-1),
stdout_pipe_in_fd(-1),
stderr_pipe_in_fd(-1),
pid(0),
errstr() {
}
SubProcess::~SubProcess() {
ceph_assert(!is_spawned());
ceph_assert(stdin_pipe_out_fd == -1);
ceph_assert(stdout_pipe_in_fd == -1);
ceph_assert(stderr_pipe_in_fd == -1);
}
void SubProcess::add_cmd_args(const char *arg, ...) {
ceph_assert(!is_spawned());
va_list ap;
va_start(ap, arg);
const char *p = arg;
do {
add_cmd_arg(p);
p = va_arg(ap, const char*);
} while (p != NULL);
va_end(ap);
}
void SubProcess::add_cmd_arg(const char *arg) {
ceph_assert(!is_spawned());
cmd_args.push_back(arg);
}
int SubProcess::get_stdin() const {
ceph_assert(is_spawned());
ceph_assert(stdin_op == PIPE);
return stdin_pipe_out_fd;
}
int SubProcess::get_stdout() const {
ceph_assert(is_spawned());
ceph_assert(stdout_op == PIPE);
return stdout_pipe_in_fd;
}
int SubProcess::get_stderr() const {
ceph_assert(is_spawned());
ceph_assert(stderr_op == PIPE);
return stderr_pipe_in_fd;
}
void SubProcess::close(int &fd) {
if (fd == -1)
return;
::close(fd);
fd = -1;
}
void SubProcess::close_stdin() {
ceph_assert(is_spawned());
ceph_assert(stdin_op == PIPE);
close(stdin_pipe_out_fd);
}
void SubProcess::close_stdout() {
ceph_assert(is_spawned());
ceph_assert(stdout_op == PIPE);
close(stdout_pipe_in_fd);
}
void SubProcess::close_stderr() {
ceph_assert(is_spawned());
ceph_assert(stderr_op == PIPE);
close(stderr_pipe_in_fd);
}
const std::string SubProcess::err() const {
return errstr.str();
}
SubProcessTimed::SubProcessTimed(const char *cmd, std_fd_op stdin_op,
std_fd_op stdout_op, std_fd_op stderr_op,
int timeout_, int sigkill_) :
SubProcess(cmd, stdin_op, stdout_op, stderr_op),
timeout(timeout_),
sigkill(sigkill_) {
}
static bool timedout = false;
void timeout_sighandler(int sig) {
timedout = true;
}
void SubProcess::close_h(HANDLE &handle) {
if (handle == INVALID_HANDLE_VALUE)
return;
CloseHandle(handle);
handle = INVALID_HANDLE_VALUE;
}
int SubProcess::join() {
ceph_assert(is_spawned());
close(stdin_pipe_out_fd);
close(stdout_pipe_in_fd);
close(stderr_pipe_in_fd);
int status = 0;
if (WaitForSingleObject(proc_handle, INFINITE) != WAIT_FAILED) {
if (!GetExitCodeProcess(proc_handle, &status)) {
errstr << cmd << ": Could not get exit code: " << pid
<< ". Error code: " << GetLastError();
status = -ECHILD;
} else if (status) {
errstr << cmd << ": exit status: " << status;
}
} else {
errstr << cmd << ": Waiting for child process failed: " << pid
<< ". Error code: " << GetLastError();
status = -ECHILD;
}
close_h(proc_handle);
pid = 0;
return status;
}
void SubProcess::kill(int signo) const {
ceph_assert(is_spawned());
ceph_assert(TerminateProcess(proc_handle, 128 + SIGTERM));
}
int SubProcess::spawn() {
std::ostringstream cmdline;
cmdline << cmd;
for (auto& arg : cmd_args) {
cmdline << " " << std::quoted(arg);
}
STARTUPINFO si = {0};
PROCESS_INFORMATION pi = {0};
SECURITY_ATTRIBUTES sa = {0};
sa.nLength = sizeof(SECURITY_ATTRIBUTES);
sa.bInheritHandle = TRUE;
sa.lpSecurityDescriptor = NULL;
HANDLE stdin_r = INVALID_HANDLE_VALUE, stdin_w = INVALID_HANDLE_VALUE,
stdout_r = INVALID_HANDLE_VALUE, stdout_w = INVALID_HANDLE_VALUE,
stderr_r = INVALID_HANDLE_VALUE, stderr_w = INVALID_HANDLE_VALUE;
if ((stdin_op == PIPE && !CreatePipe(&stdin_r, &stdin_w, &sa, 0)) ||
(stdout_op == PIPE && !CreatePipe(&stdout_r, &stdout_w, &sa, 0)) ||
(stderr_op == PIPE && !CreatePipe(&stderr_r, &stderr_w, &sa, 0))) {
errstr << cmd << ": CreatePipe failed: " << GetLastError();
return -1;
}
// The following handles will be used by the parent process and
// must be marked as non-inheritable.
if ((stdin_op == PIPE && !SetHandleInformation(stdin_w, HANDLE_FLAG_INHERIT, 0)) ||
(stdout_op == PIPE && !SetHandleInformation(stdout_r, HANDLE_FLAG_INHERIT, 0)) ||
(stderr_op == PIPE && !SetHandleInformation(stderr_r, HANDLE_FLAG_INHERIT, 0))) {
errstr << cmd << ": SetHandleInformation failed: "
<< GetLastError();
goto fail;
}
si.cb = sizeof(STARTUPINFO);
si.hStdInput = stdin_op == KEEP ? GetStdHandle(STD_INPUT_HANDLE) : stdin_r;
si.hStdOutput = stdout_op == KEEP ? GetStdHandle(STD_OUTPUT_HANDLE) : stdout_w;
si.hStdError = stderr_op == KEEP ? GetStdHandle(STD_ERROR_HANDLE) : stderr_w;
si.dwFlags |= STARTF_USESTDHANDLES;
stdin_pipe_out_fd = stdin_op == PIPE ? _open_osfhandle((intptr_t)stdin_w, 0) : -1;
stdout_pipe_in_fd = stdout_op == PIPE ? _open_osfhandle((intptr_t)stdout_r, _O_RDONLY) : - 1;
stderr_pipe_in_fd = stderr_op == PIPE ? _open_osfhandle((intptr_t)stderr_r, _O_RDONLY) : -1;
if (stdin_op == PIPE && stdin_pipe_out_fd == -1 ||
stdout_op == PIPE && stdout_pipe_in_fd == -1 ||
stderr_op == PIPE && stderr_pipe_in_fd == -1) {
errstr << cmd << ": _open_osfhandle failed: " << GetLastError();
goto fail;
}
// We've transfered ownership from those handles.
stdin_w = stdout_r = stderr_r = INVALID_HANDLE_VALUE;
if (!CreateProcess(
NULL, const_cast<char*>(cmdline.str().c_str()),
NULL, NULL, /* No special security attributes */
1, /* Inherit handles marked as inheritable */
0, /* No special flags */
NULL, /* Use the same environment variables */
NULL, /* use the same cwd */
&si, &pi)) {
errstr << cmd << ": CreateProcess failed: " << GetLastError();
goto fail;
}
proc_handle = pi.hProcess;
pid = GetProcessId(proc_handle);
if (!pid) {
errstr << cmd << ": Could not get child process id.";
goto fail;
}
// The following are used by the subprocess.
CloseHandle(stdin_r);
CloseHandle(stdout_w);
CloseHandle(stderr_w);
CloseHandle(pi.hThread);
return 0;
fail:
// fd copies
close(stdin_pipe_out_fd);
close(stdout_pipe_in_fd);
close(stderr_pipe_in_fd);
// the original handles
close_h(stdin_r);
close_h(stdin_w);
close_h(stdout_r);
close_h(stdout_w);
close_h(stderr_r);
close_h(stderr_w);
// We may consider mapping some of the Windows errors.
return -1;
}
void SubProcess::exec() {
}
int SubProcessTimed::spawn() {
if (auto ret = SubProcess::spawn(); ret < 0) {
return ret;
}
if (timeout > 0) {
waiter = std::thread([&](){
DWORD wait_status = WaitForSingleObject(proc_handle, timeout * 1000);
ceph_assert(wait_status != WAIT_FAILED);
if (wait_status == WAIT_TIMEOUT) {
// 128 + sigkill is just the return code, which is expected by
// the unit tests and possibly by other code. We can't pick a
// termination signal unless we use window events.
ceph_assert(TerminateProcess(proc_handle, 128 + sigkill));
timedout = 1;
}
});
}
return 0;
}
int SubProcessTimed::join() {
ceph_assert(is_spawned());
if (waiter.joinable()) {
waiter.join();
}
return SubProcess::join();;
}
void SubProcessTimed::exec() {
}
| 7,862 | 24.612378 | 107 | cc |
null | ceph-main/src/common/win32/blkdev.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include "common/blkdev.h"
int get_device_by_path(const char *path, char* partition, char* device,
size_t max)
{
return -EOPNOTSUPP;
}
BlkDev::BlkDev(int f)
: fd(f)
{}
BlkDev::BlkDev(const std::string& devname)
: devname(devname)
{}
int BlkDev::get_devid(dev_t *id) const
{
return -EOPNOTSUPP;
}
const char *BlkDev::sysfsdir() const {
assert(false); // Should never be called on Windows
return "";
}
int BlkDev::dev(char *dev, size_t max) const
{
return -EOPNOTSUPP;
}
int BlkDev::get_size(int64_t *psize) const
{
return -EOPNOTSUPP;
}
bool BlkDev::support_discard() const
{
return false;
}
int BlkDev::discard(int64_t offset, int64_t len) const
{
return -EOPNOTSUPP;
}
bool BlkDev::is_rotational() const
{
return false;
}
int BlkDev::model(char *model, size_t max) const
{
return -EOPNOTSUPP;
}
int BlkDev::serial(char *serial, size_t max) const
{
return -EOPNOTSUPP;
}
int BlkDev::partition(char *partition, size_t max) const
{
return -EOPNOTSUPP;
}
int BlkDev::wholedisk(char *wd, size_t max) const
{
return -EOPNOTSUPP;
}
void get_dm_parents(const std::string& dev, std::set<std::string> *ls)
{
}
void get_raw_devices(const std::string& in,
std::set<std::string> *ls)
{
}
std::string get_device_id(const std::string& devname,
std::string *err)
{
if (err) {
*err = "not implemented";
}
return std::string();
}
int block_device_run_smartctl(const char *device, int timeout,
std::string *result)
{
return -EOPNOTSUPP;
}
int block_device_get_metrics(const std::string& devname, int timeout,
json_spirit::mValue *result)
{
return -EOPNOTSUPP;
}
int block_device_run_nvme(const char *device, const char *vendor, int timeout,
std::string *result)
{
return -EOPNOTSUPP;
}
| 2,263 | 17.406504 | 78 | cc |
null | ceph-main/src/common/win32/dlfcn.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sstream>
#include <windows.h>
#include "common/errno.h"
#include "include/dlfcn_compat.h"
void* dlopen(const char *filename, int flags) {
return LoadLibrary(filename);
}
int dlclose(void* handle) {
//FreeLibrary returns 0 on error, as opposed to dlclose.
return !FreeLibrary(handle);
}
void* dlsym(void* handle, const char* symbol) {
return (void*)GetProcAddress(handle, symbol);
}
dl_errmsg_t dlerror() {
return win32_lasterror_str();
}
| 872 | 21.384615 | 70 | cc |
null | ceph-main/src/common/win32/dns_resolve.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/scope_guard.h"
#include "common/dns_resolve.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_
using namespace std;
namespace ceph {
int ResolvHWrapper::res_query(const char *hostname, int cls,
int type, u_char *buf, int bufsz) {
return -1;
}
int ResolvHWrapper::res_search(const char *hostname, int cls,
int type, u_char *buf, int bufsz) {
return -1;
}
DNSResolver::~DNSResolver()
{
delete resolv_h;
}
int DNSResolver::resolve_cname(CephContext *cct, const string& hostname,
string *cname, bool *found)
{
return -ENOTSUP;
}
int DNSResolver::resolve_ip_addr(CephContext *cct, const string& hostname,
entity_addr_t *addr)
{
return -ENOTSUP;
}
int DNSResolver::resolve_srv_hosts(CephContext *cct, const string& service_name,
const SRV_Protocol trans_protocol,
map<string, DNSResolver::Record> *srv_hosts)
{
return this->resolve_srv_hosts(cct, service_name, trans_protocol, "", srv_hosts);
}
int DNSResolver::resolve_srv_hosts(CephContext *cct, const string& service_name,
const SRV_Protocol trans_protocol, const string& domain,
map<string, DNSResolver::Record> *srv_hosts)
{
return -ENOTSUP;
}
}
| 1,588 | 22.716418 | 83 | cc |
null | ceph-main/src/common/win32/errno.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "include/int_types.h"
#include <ntdef.h>
#include <ntstatus.h>
#include "include/compat.h"
#include "include/int_types.h"
#include "include/types.h"
#include "include/fs_types.h"
// We're only converting errors defined in errno.h, not standard Windows
// system error codes that are usually retrievied using GetLastErrorCode().
// TODO: consider converting WinSock2 (WSA*) error codes, which are quite
// similar to the errno.h ones.
__u32 ceph_to_hostos_errno_unsigned(__u32 r)
{
// using an array like like freebsd_errno.cc might be more readable but
// we have some large values defined by Boost.
switch(r) {
case 1: return EPERM;
case 2: return ENOENT;
case 3: return ESRCH;
case 4: return EINTR;
case 5: return EIO;
case 6: return ENXIO;
case 7: return E2BIG;
case 8: return ENOEXEC;
case 9: return EBADF;
case 10: return ECHILD;
// same as EWOULDBLOCK
case 11: return EAGAIN;
case 12: return ENOMEM;
case 13: return EACCES;
case 14: return EFAULT;
case 15: return ENOTBLK;
case 16: return EBUSY;
case 17: return EEXIST;
case 18: return EXDEV;
case 19: return ENODEV;
case 20: return ENOTDIR;
case 21: return EISDIR;
case 22: return EINVAL;
case 23: return ENFILE;
case 24: return EMFILE;
case 25: return ENOTTY;
case 26: return ETXTBSY;
case 27: return EFBIG;
case 28: return ENOSPC;
case 29: return ESPIPE;
case 30: return EROFS;
case 31: return EMLINK;
case 32: return EPIPE;
case 33: return EDOM;
case 34: return ERANGE;
// same as EDEADLK
case 35: return EDEADLOCK;
case 36: return ENAMETOOLONG;
case 37: return ENOLCK;
case 38: return ENOSYS;
case 39: return ENOTEMPTY;
case 40: return ELOOP;
case 42: return ENOMSG;
case 43: return EIDRM;
case 44: return ECHRNG;
case 45: return EL2NSYNC;
case 46: return EL3HLT;
case 47: return EL3RST;
case 48: return ELNRNG;
case 49: return EUNATCH;
case 50: return ENOCSI;
case 51: return EL2HLT;
case 52: return EBADE;
case 53: return EBADR;
case 54: return EXFULL;
case 55: return ENOANO;
case 56: return EBADRQC;
case 57: return EBADSLT;
case 59: return EBFONT;
case 60: return ENOSTR;
case 61: return ENODATA;
case 62: return ETIME;
case 63: return ENOSR;
case 64: return ENONET;
case 65: return ENOPKG;
case 66: return EREMOTE;
case 67: return ENOLINK;
case 68: return EADV;
case 69: return ESRMNT;
case 70: return ECOMM;
case 71: return EPROTO;
case 72: return EMULTIHOP;
case 73: return EDOTDOT;
case 74: return EBADMSG;
case 75: return EOVERFLOW;
case 76: return ENOTUNIQ;
case 77: return EBADFD;
case 78: return EREMCHG;
case 79: return ELIBACC;
case 80: return ELIBBAD;
case 81: return ELIBSCN;
case 82: return ELIBMAX;
case 83: return ELIBEXEC;
case 84: return EILSEQ;
case 85: return ERESTART;
case 86: return ESTRPIPE;
case 87: return EUSERS;
case 88: return ENOTSOCK;
case 89: return EDESTADDRREQ;
case 90: return EMSGSIZE;
case 91: return EPROTOTYPE;
case 92: return ENOPROTOOPT;
case 93: return EPROTONOSUPPORT;
case 94: return ESOCKTNOSUPPORT;
// same as ENOTSUP
case 95: return EOPNOTSUPP;
case 96: return EPFNOSUPPORT;
case 97: return EAFNOSUPPORT;
case 98: return EADDRINUSE;
case 99: return EADDRNOTAVAIL;
case 100: return ENETDOWN;
case 101: return ENETUNREACH;
case 102: return ENETRESET;
case 103: return ECONNABORTED;
case 104: return ECONNRESET;
case 105: return ENOBUFS;
case 106: return EISCONN;
case 107: return ENOTCONN;
case 108: return ESHUTDOWN;
case 109: return ETOOMANYREFS;
case 110: return ETIMEDOUT;
case 111: return ECONNREFUSED;
case 112: return EHOSTDOWN;
case 113: return EHOSTUNREACH;
case 114: return EALREADY;
case 115: return EINPROGRESS;
case 116: return ESTALE;
case 117: return EUCLEAN;
case 118: return ENOTNAM;
case 119: return ENAVAIL;
case 120: return EISNAM;
case 121: return EREMOTEIO;
case 122: return EDQUOT;
case 123: return ENOMEDIUM;
case 124: return EMEDIUMTYPE;
case 125: return ECANCELED;
case 126: return ENOKEY;
case 127: return EKEYEXPIRED;
case 128: return EKEYREVOKED;
case 129: return EKEYREJECTED;
case 130: return EOWNERDEAD;
case 131: return ENOTRECOVERABLE;
case 132: return ERFKILL;
case 133: return EHWPOISON;
default:
return r;
}
}
__u32 hostos_to_ceph_errno_unsigned(__u32 r) {
// Windows errno -> Linux errno
switch(r) {
case EPERM: return 1;
case ENOENT: return 2;
case ESRCH: return 3;
case EINTR: return 4;
case EIO: return 5;
case ENXIO: return 6;
case E2BIG: return 7;
case ENOEXEC: return 8;
case EBADF: return 9;
case ECHILD: return 10;
case EAGAIN: return 11;
case EWOULDBLOCK: return 11;
case ENOMEM: return 12;
case EACCES: return 13;
case EFAULT: return 14;
case ENOTBLK: return 15;
case EBUSY: return 16;
case EEXIST: return 17;
case EXDEV: return 18;
case ENODEV: return 19;
case ENOTDIR: return 20;
case EISDIR: return 21;
case EINVAL: return 22;
case ENFILE: return 23;
case EMFILE: return 24;
case ENOTTY: return 25;
case ETXTBSY: return 26;
case EFBIG: return 27;
case ENOSPC: return 28;
case ESPIPE: return 29;
case EROFS: return 30;
case EMLINK: return 31;
case EPIPE: return 32;
case EDOM: return 33;
case ERANGE: return 34;
// same as EDEADLOCK
// case EDEADLK: return 35;
case EDEADLOCK: return 35;
case ENAMETOOLONG: return 36;
case ENOLCK: return 37;
case ENOSYS: return 38;
case ENOTEMPTY: return 39;
case ELOOP: return 40;
case ENOMSG: return 42;
case EIDRM: return 43;
case ECHRNG: return 44;
case EL2NSYNC: return 45;
case EL3HLT: return 46;
case EL3RST: return 47;
case ELNRNG: return 48;
case EUNATCH: return 49;
case ENOCSI: return 50;
case EL2HLT: return 51;
case EBADE: return 52;
case EBADR: return 53;
case EXFULL: return 54;
case ENOANO: return 55;
case EBADRQC: return 56;
case EBADSLT: return 57;
case EBFONT: return 59;
case ENOSTR: return 60;
case ENODATA: return 61;
case ETIME: return 62;
case ENOSR: return 63;
case ENONET: return 64;
case ENOPKG: return 65;
case EREMOTE: return 66;
case ENOLINK: return 67;
case EADV: return 68;
case ESRMNT: return 69;
case ECOMM: return 70;
case EPROTO: return 71;
case EMULTIHOP: return 72;
case EDOTDOT: return 73;
case EBADMSG: return 74;
case EOVERFLOW: return 75;
case ENOTUNIQ: return 76;
case EBADFD: return 77;
case EREMCHG: return 78;
case ELIBACC: return 79;
case ELIBBAD: return 80;
case ELIBSCN: return 81;
case ELIBMAX: return 82;
case ELIBEXEC: return 83;
case EILSEQ: return 84;
// compat.h defines ERESTART as EINTR
// case ERESTART: return 85;
case ESTRPIPE: return 86;
case EUSERS: return 87;
case ENOTSOCK: return 88;
case EDESTADDRREQ: return 89;
case EMSGSIZE: return 90;
case EPROTOTYPE: return 91;
case ENOPROTOOPT: return 92;
case EPROTONOSUPPORT: return 93;
case ESOCKTNOSUPPORT: return 94;
case EOPNOTSUPP: return 95;
case ENOTSUP: return 95;
case EPFNOSUPPORT: return 96;
case EAFNOSUPPORT: return 97;
case EADDRINUSE: return 98;
case EADDRNOTAVAIL: return 99;
case ENETDOWN: return 100;
case ENETUNREACH: return 101;
case ENETRESET: return 102;
case ECONNABORTED: return 103;
case ECONNRESET: return 104;
case ENOBUFS: return 105;
case EISCONN: return 106;
case ENOTCONN: return 107;
case ESHUTDOWN: return 108;
case ETOOMANYREFS: return 109;
case ETIMEDOUT: return 110;
case ECONNREFUSED: return 111;
case EHOSTDOWN: return 112;
case EHOSTUNREACH: return 113;
case EALREADY: return 114;
case EINPROGRESS: return 115;
case ESTALE: return 116;
case EUCLEAN: return 117;
case ENOTNAM: return 118;
case ENAVAIL: return 119;
case EISNAM: return 120;
case EREMOTEIO: return 121;
case EDQUOT: return 122;
case ENOMEDIUM: return 123;
case EMEDIUMTYPE: return 124;
case ECANCELED: return 125;
case ENOKEY: return 126;
case EKEYEXPIRED: return 127;
case EKEYREVOKED: return 128;
case EKEYREJECTED: return 129;
case EOWNERDEAD: return 130;
case ENOTRECOVERABLE: return 131;
case ERFKILL: return 132;
case EHWPOISON: return 133;
default:
return r;
}
}
__s32 wsae_to_errno_unsigned(__s32 r)
{
switch(r) {
case WSAEINTR: return EINTR;
case WSAEBADF: return EBADF;
case WSAEACCES: return EACCES;
case WSAEFAULT: return EFAULT;
case WSAEINVAL: return EINVAL;
case WSAEMFILE: return EMFILE;
// Linux defines WSAEWOULDBLOCK as EAGAIN, but not Windows headers.
// Since all ceph code uses EAGAIN instead of EWOULDBLOCK, we'll do
// the same here.
case WSAEWOULDBLOCK: return EAGAIN;
// Some functions (e.g. connect) can return WSAEWOULDBLOCK instead of
// EINPROGRESS.
case WSAEINPROGRESS: return EINPROGRESS;
case WSAEALREADY: return EALREADY;
case WSAENOTSOCK: return ENOTSOCK;
case WSAEDESTADDRREQ: return EDESTADDRREQ;
case WSAEMSGSIZE: return EMSGSIZE;
case WSAEPROTOTYPE: return EPROTOTYPE;
case WSAENOPROTOOPT: return ENOPROTOOPT;
case WSAEPROTONOSUPPORT: return EPROTONOSUPPORT;
case WSAESOCKTNOSUPPORT: return ESOCKTNOSUPPORT;
case WSAEOPNOTSUPP: return EOPNOTSUPP;
case WSAEPFNOSUPPORT: return EPFNOSUPPORT;
case WSAEAFNOSUPPORT: return EAFNOSUPPORT;
case WSAEADDRINUSE: return EADDRINUSE;
case WSAEADDRNOTAVAIL: return EADDRNOTAVAIL;
case WSAENETDOWN: return ENETDOWN;
case WSAENETUNREACH: return ENETUNREACH;
case WSAENETRESET: return ENETRESET;
case WSAECONNABORTED: return ECONNABORTED;
case WSAECONNRESET: return ECONNRESET;
case WSAENOBUFS: return ENOBUFS;
case WSAEISCONN: return EISCONN;
case WSAENOTCONN: return ENOTCONN;
case WSAESHUTDOWN: return ESHUTDOWN;
case WSAETOOMANYREFS: return ETOOMANYREFS;
case WSAETIMEDOUT: return ETIMEDOUT;
case WSAECONNREFUSED: return ECONNREFUSED;
case WSAELOOP: return ELOOP;
case WSAENAMETOOLONG: return ENAMETOOLONG;
case WSAEHOSTDOWN: return EHOSTDOWN;
case WSAEHOSTUNREACH: return EHOSTUNREACH;
case WSAENOTEMPTY: return ENOTEMPTY;
// case WSAEPROCLIM
case WSAEUSERS: return EUSERS;
case WSAEDQUOT: return EDQUOT;
case WSAESTALE: return ESTALE;
case WSAEREMOTE: return EREMOTE;
// case WSASYSNOTREADY
// case WSAVERNOTSUPPORTED
// case WSANOTINITIALISED
case WSAEDISCON: return ESHUTDOWN;
// case WSAENOMORE
case WSAECANCELLED: return ECANCELED;
// We might return EINVAL, but it's probably better if we propagate the
// original error code here.
// case WSAEINVALIDPROCTABLE
// case WSAEINVALIDPROVIDER
// case WSAEPROVIDERFAILEDINIT
// case WSASYSCALLFAILURE
// case WSASERVICE_NOT_FOUND:
// case WSATYPE_NOT_FOUND:
// case WSA_E_NO_MORE:
case WSA_E_CANCELLED: return ECANCELED;
case WSAEREFUSED: return ECONNREFUSED;
case WSAHOST_NOT_FOUND: return EHOSTUNREACH;
case WSATRY_AGAIN: return EAGAIN;
// case WSANO_RECOVERY
// case WSANO_DATA:
default: return r;
}
}
// converts from linux errno values to host values
__s32 ceph_to_hostos_errno(__s32 r)
{
int sign = (r < 0 ? -1 : 1);
return ceph_to_hostos_errno_unsigned(abs(r)) * sign;
}
// converts Host OS errno values to linux/Ceph values
__s32 hostos_to_ceph_errno(__s32 r)
{
int sign = (r < 0 ? -1 : 1);
return hostos_to_ceph_errno_unsigned(abs(r)) * sign;
}
__s32 wsae_to_errno(__s32 r)
{
int sign = (r < 0 ? -1 : 1);
return wsae_to_errno_unsigned(abs(r)) * sign;
}
__u32 errno_to_ntstatus(__s32 r) {
// errno -> NTSTATUS
// In some cases, there might be more than one applicable NTSTATUS
// value or there might be none. Certain values can be overridden
// when the caller (or whoever is supposed to handle the error) is
// expecting a different NTSTATUS value.
r = abs(r);
switch(r) {
case 0: return 0;
case EPERM: return STATUS_ACCESS_DENIED;
case ENOENT: return STATUS_OBJECT_NAME_NOT_FOUND;
case ESRCH: return STATUS_NOT_FOUND;
case EINTR: return STATUS_RETRY;
case EIO: return STATUS_DATA_ERROR;
case ENXIO: return STATUS_NOT_FOUND;
case E2BIG: return STATUS_FILE_TOO_LARGE;
case ENOEXEC: return STATUS_ACCESS_DENIED;
case EBADF: return STATUS_INVALID_HANDLE;
case ECHILD: return STATUS_INTERNAL_ERROR;
case EAGAIN: return STATUS_RETRY;
case EWOULDBLOCK: return STATUS_RETRY;
case ENOMEM: return STATUS_NO_MEMORY;
case EACCES: return STATUS_ACCESS_DENIED;
case EFAULT: return STATUS_INVALID_ADDRESS;
case ENOTBLK: return STATUS_BAD_DEVICE_TYPE;
case EBUSY: return STATUS_DEVICE_BUSY;
case EEXIST: return STATUS_OBJECT_NAME_COLLISION;
case EXDEV: return STATUS_NOT_SAME_DEVICE;
case ENODEV: return STATUS_SYSTEM_DEVICE_NOT_FOUND;
case ENOTDIR: return STATUS_NOT_A_DIRECTORY;
case EISDIR: return STATUS_FILE_IS_A_DIRECTORY;
case EINVAL: return STATUS_INVALID_PARAMETER;
case ENFILE: return STATUS_TOO_MANY_OPENED_FILES;
case EMFILE: return STATUS_TOO_MANY_OPENED_FILES;
case ENOTTY: return STATUS_INVALID_PARAMETER;
case ETXTBSY: return STATUS_DEVICE_BUSY;
case EFBIG: return STATUS_FILE_TOO_LARGE;
case ENOSPC: return STATUS_DISK_FULL;
case ESPIPE: return STATUS_INVALID_PARAMETER;
case EROFS: return STATUS_MEDIA_WRITE_PROTECTED;
case EMLINK: return STATUS_TOO_MANY_LINKS;
case EPIPE: return STATUS_PIPE_BROKEN;
case EDOM: return STATUS_INVALID_PARAMETER;
case ERANGE: return STATUS_INVALID_PARAMETER;
// same as EDEADLOCK
// case EDEADLK: return 35;
case EDEADLOCK: return STATUS_POSSIBLE_DEADLOCK;
case ENAMETOOLONG: return STATUS_NAME_TOO_LONG;
case ENOLCK: return STATUS_NOT_LOCKED;
case ENOSYS: return STATUS_NOT_IMPLEMENTED;
case ENOTEMPTY: return STATUS_DIRECTORY_NOT_EMPTY;
case ELOOP: return STATUS_TOO_MANY_LINKS;
case ENOMSG: return STATUS_MESSAGE_NOT_FOUND;
case EIDRM: return STATUS_INVALID_PARAMETER;
case ECHRNG: return STATUS_INVALID_PARAMETER;
case EL2NSYNC: return STATUS_INTERNAL_ERROR;
case EL3HLT: return STATUS_INTERNAL_ERROR;
case EL3RST: return STATUS_INTERNAL_ERROR;
case ELNRNG: return STATUS_INTERNAL_ERROR;
case EUNATCH: return STATUS_INTERNAL_ERROR;
case ENOCSI: return STATUS_INTERNAL_ERROR;
case EL2HLT: return STATUS_INTERNAL_ERROR;
case EBADE: return STATUS_INTERNAL_ERROR;
case EBADR: return STATUS_INVALID_HANDLE;
case EXFULL: return STATUS_DISK_FULL;
case ENOANO: return STATUS_INTERNAL_ERROR;
case EBADRQC: return STATUS_INVALID_PARAMETER;
case EBADSLT: return STATUS_INVALID_PARAMETER;
case EBFONT: return STATUS_INVALID_PARAMETER;
case ENOSTR: return STATUS_INVALID_PARAMETER;
case ENODATA: return STATUS_NOT_FOUND;
case ETIME: return STATUS_TIMEOUT;
case ENOSR: return STATUS_INSUFFICIENT_RESOURCES;
case ENONET: return STATUS_NETWORK_UNREACHABLE;
case ENOPKG: return STATUS_NO_SUCH_PACKAGE;
case EREMOTE: return STATUS_INVALID_PARAMETER;
case ENOLINK: return STATUS_INTERNAL_ERROR;
case EADV: return STATUS_INTERNAL_ERROR;
case ESRMNT: return STATUS_INTERNAL_ERROR;
case ECOMM: return STATUS_INTERNAL_ERROR;
case EPROTO: return STATUS_PROTOCOL_NOT_SUPPORTED;
case EMULTIHOP: return STATUS_INTERNAL_ERROR;
case EDOTDOT: return STATUS_INTERNAL_ERROR;
case EBADMSG: return STATUS_INVALID_PARAMETER;
case EOVERFLOW: return STATUS_BUFFER_OVERFLOW;
case ENOTUNIQ: return STATUS_DUPLICATE_NAME;
case EBADFD: return STATUS_INVALID_HANDLE;
case EREMCHG: return STATUS_FILE_RENAMED;
case ELIBACC: return STATUS_DLL_NOT_FOUND;
case ELIBBAD: return STATUS_BAD_DLL_ENTRYPOINT;
case ELIBSCN: return STATUS_BAD_DLL_ENTRYPOINT;
case ELIBMAX: return STATUS_TOO_MANY_OPENED_FILES;
case ELIBEXEC: return STATUS_INVALID_PARAMETER;
case EILSEQ: return STATUS_INVALID_PARAMETER;
// compat.h defines ERESTART as EINTR
// case ERESTART: return 85;
case ESTRPIPE: return STATUS_RETRY;
case EUSERS: return STATUS_TOO_MANY_SIDS;
case ENOTSOCK: return STATUS_INVALID_HANDLE;
case EDESTADDRREQ: return STATUS_INVALID_PARAMETER;
case EMSGSIZE: return STATUS_BUFFER_OVERFLOW;
case EPROTOTYPE: return STATUS_INVALID_PARAMETER;
case ENOPROTOOPT: return STATUS_PROTOCOL_NOT_SUPPORTED;
case EPROTONOSUPPORT: return STATUS_PROTOCOL_NOT_SUPPORTED;
case ESOCKTNOSUPPORT: return STATUS_NOT_SUPPORTED;
case EOPNOTSUPP: return STATUS_NOT_SUPPORTED;
case ENOTSUP: return STATUS_NOT_SUPPORTED;
case EPFNOSUPPORT: return STATUS_PROTOCOL_NOT_SUPPORTED;
case EAFNOSUPPORT: return STATUS_NOT_SUPPORTED;
case EADDRINUSE: return STATUS_ADDRESS_ALREADY_EXISTS;
case EADDRNOTAVAIL: return STATUS_INVALID_ADDRESS;
case ENETDOWN: return STATUS_NETWORK_UNREACHABLE;
case ENETUNREACH: return STATUS_NETWORK_UNREACHABLE;
case ENETRESET: return STATUS_CONNECTION_RESET;
case ECONNABORTED: return STATUS_CONNECTION_ABORTED;
case ECONNRESET: return STATUS_CONNECTION_DISCONNECTED;
case ENOBUFS: return STATUS_BUFFER_TOO_SMALL;
case EISCONN: return STATUS_CONNECTION_ACTIVE;
case ENOTCONN: return STATUS_CONNECTION_DISCONNECTED;
case ESHUTDOWN: return STATUS_SYSTEM_SHUTDOWN;
case ETOOMANYREFS: return STATUS_TOO_MANY_LINKS;
case ETIMEDOUT: return STATUS_TIMEOUT;
case ECONNREFUSED: return STATUS_CONNECTION_REFUSED;
case EHOSTDOWN: return STATUS_FILE_CLOSED;
case EHOSTUNREACH: return STATUS_HOST_UNREACHABLE;
case EALREADY: return STATUS_PENDING;
case EINPROGRESS: return STATUS_PENDING;
case ESTALE: return STATUS_INVALID_HANDLE;
case EUCLEAN: return STATUS_INVALID_PARAMETER;
case ENOTNAM: return STATUS_INVALID_PARAMETER;
case ENAVAIL: return STATUS_INVALID_PARAMETER;
case EISNAM: return STATUS_INVALID_PARAMETER;
case EREMOTEIO: return STATUS_DATA_ERROR;
case EDQUOT: return STATUS_QUOTA_EXCEEDED;
case ENOMEDIUM: return STATUS_NO_MEDIA;
case EMEDIUMTYPE: return STATUS_INVALID_PARAMETER;
case ECANCELED: return STATUS_REQUEST_CANCELED;
case ENOKEY: return STATUS_NO_USER_KEYS;
case EKEYEXPIRED: return STATUS_SMARTCARD_CERT_EXPIRED;
case EKEYREVOKED: return STATUS_IMAGE_CERT_REVOKED;
case EKEYREJECTED: return STATUS_ACCESS_DENIED;
case EOWNERDEAD: return STATUS_INTERNAL_ERROR;
case ENOTRECOVERABLE: return STATUS_INTERNAL_ERROR;
case ERFKILL: return STATUS_INTERNAL_ERROR;
case EHWPOISON: return STATUS_INTERNAL_ERROR;
default:
return STATUS_INTERNAL_ERROR;
}
}
std::string win32_strerror(int err)
{
// As opposed to dlerror messages, this has to be freed.
LPSTR msg = NULL;
DWORD msg_len = ::FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
err,
0,
(LPSTR) &msg,
0,
NULL);
std::ostringstream msg_stream;
msg_stream << "(" << err << ") ";
if (!msg_len) {
msg_stream << "Unknown error";
}
else {
msg_stream << msg;
::LocalFree(msg);
}
return msg_stream.str();
}
std::string win32_lasterror_str()
{
DWORD err = ::GetLastError();
return win32_strerror(err);
}
static const ceph::unordered_map<int,NTSTATUS> cephfs_errno_to_ntstatus = {
{CEPHFS_EBLOCKLISTED, STATUS_SYSTEM_SHUTDOWN},
{CEPHFS_EPERM, STATUS_ACCESS_DENIED},
{CEPHFS_ESTALE, STATUS_INVALID_HANDLE},
{CEPHFS_ENOSPC, STATUS_DISK_FULL},
{CEPHFS_ETIMEDOUT, STATUS_TIMEOUT},
{CEPHFS_EIO, STATUS_DATA_ERROR},
{CEPHFS_ENOTCONN, STATUS_CONNECTION_DISCONNECTED},
{CEPHFS_EEXIST, STATUS_OBJECT_NAME_COLLISION},
{CEPHFS_EINTR, STATUS_RETRY},
{CEPHFS_EINVAL, STATUS_INVALID_PARAMETER},
{CEPHFS_EBADF, STATUS_INVALID_HANDLE},
{CEPHFS_EROFS, STATUS_MEDIA_WRITE_PROTECTED},
{CEPHFS_EAGAIN, STATUS_RETRY},
{CEPHFS_EACCES, STATUS_ACCESS_DENIED},
{CEPHFS_ELOOP, STATUS_TOO_MANY_LINKS},
{CEPHFS_EISDIR, STATUS_FILE_IS_A_DIRECTORY},
{CEPHFS_ENOENT, STATUS_OBJECT_NAME_NOT_FOUND},
{CEPHFS_ENOTDIR, STATUS_NOT_A_DIRECTORY},
{CEPHFS_ENAMETOOLONG, STATUS_NAME_TOO_LONG},
{CEPHFS_EBUSY, STATUS_DEVICE_BUSY},
{CEPHFS_EDQUOT, STATUS_QUOTA_EXCEEDED},
{CEPHFS_EFBIG, STATUS_FILE_TOO_LARGE},
{CEPHFS_ERANGE, STATUS_INVALID_PARAMETER},
{CEPHFS_ENXIO, STATUS_NOT_FOUND},
{CEPHFS_ECANCELED, STATUS_REQUEST_CANCELED},
{CEPHFS_ENODATA, STATUS_NOT_FOUND},
{CEPHFS_EOPNOTSUPP, STATUS_NOT_SUPPORTED},
{CEPHFS_EXDEV, STATUS_NOT_SAME_DEVICE},
{CEPHFS_ENOMEM, STATUS_NO_MEMORY},
{CEPHFS_ENOTRECOVERABLE, STATUS_INTERNAL_ERROR},
{CEPHFS_ENOSYS, STATUS_NOT_IMPLEMENTED},
{CEPHFS_ENOTEMPTY, STATUS_DIRECTORY_NOT_EMPTY},
{CEPHFS_EDEADLK, STATUS_POSSIBLE_DEADLOCK},
{CEPHFS_EDOM, STATUS_INVALID_PARAMETER},
{CEPHFS_EMLINK, STATUS_TOO_MANY_LINKS},
{CEPHFS_ETIME, STATUS_TIMEOUT},
{CEPHFS_EOLDSNAPC, STATUS_DATA_ERROR}
};
__u32 cephfs_errno_to_ntstatus_map(int cephfs_errno)
{
cephfs_errno = abs(cephfs_errno);
if (cephfs_errno == 0)
return 0;
auto it = cephfs_errno_to_ntstatus.find(cephfs_errno);
if (it != cephfs_errno_to_ntstatus.end())
return it->second;
return STATUS_INTERNAL_ERROR;
}
| 22,524 | 33.49464 | 75 | cc |
null | ceph-main/src/common/win32/ifaddrs.cc | #include <errno.h>
#include <winsock2.h>
#include <wincrypt.h>
#include <iphlpapi.h>
#include <ws2tcpip.h>
#include <ifaddrs.h>
#include <stdio.h>
#include "include/compat.h"
int getifaddrs(struct ifaddrs **ifap)
{
int ret = 0;
DWORD size, res = 0;
res = GetAdaptersAddresses(
AF_UNSPEC, GAA_FLAG_INCLUDE_PREFIX,
NULL, NULL, &size);
if (res != ERROR_BUFFER_OVERFLOW) {
errno = ENOMEM;
return -1;
}
PIP_ADAPTER_ADDRESSES adapter_addrs = (PIP_ADAPTER_ADDRESSES)malloc(size);
res = GetAdaptersAddresses(
AF_UNSPEC, GAA_FLAG_INCLUDE_PREFIX,
NULL, adapter_addrs, &size);
if (res != ERROR_SUCCESS) {
errno = ENOMEM;
return -1;
}
struct ifaddrs *out_list_head = NULL;
struct ifaddrs *out_list_curr;
for (PIP_ADAPTER_ADDRESSES curr_addrs = adapter_addrs;
curr_addrs != NULL;
curr_addrs = curr_addrs->Next) {
if (curr_addrs->OperStatus != 1)
continue;
for (PIP_ADAPTER_UNICAST_ADDRESS unicast_addrs = curr_addrs->FirstUnicastAddress;
unicast_addrs != NULL;
unicast_addrs = unicast_addrs->Next) {
SOCKADDR* unicast_sockaddr = unicast_addrs->Address.lpSockaddr;
if (unicast_sockaddr->sa_family != AF_INET &&
unicast_sockaddr->sa_family != AF_INET6)
continue;
out_list_curr = calloc(sizeof(*out_list_curr), 1);
if (!out_list_curr) {
errno = ENOMEM;
ret = -1;
goto out;
}
out_list_curr->ifa_next = out_list_head;
out_list_head = out_list_curr;
out_list_curr->ifa_flags = IFF_UP;
if (curr_addrs->IfType == IF_TYPE_SOFTWARE_LOOPBACK)
out_list_curr->ifa_flags |= IFF_LOOPBACK;
out_list_curr->ifa_addr = (struct sockaddr *) &out_list_curr->in_addrs;
out_list_curr->ifa_netmask = (struct sockaddr *) &out_list_curr->in_netmasks;
out_list_curr->ifa_name = out_list_curr->ad_name;
if (unicast_sockaddr->sa_family == AF_INET) {
ULONG subnet_mask = 0;
if (ConvertLengthToIpv4Mask(unicast_addrs->OnLinkPrefixLength, &subnet_mask)) {
errno = ENODATA;
ret = -1;
goto out;
}
struct sockaddr_in *addr4 = (struct sockaddr_in *) &out_list_curr->in_addrs;
struct sockaddr_in *netmask4 = (struct sockaddr_in *) &out_list_curr->in_netmasks;
netmask4->sin_family = unicast_sockaddr->sa_family;
addr4->sin_family = unicast_sockaddr->sa_family;
netmask4->sin_addr.S_un.S_addr = subnet_mask;
addr4->sin_addr = ((struct sockaddr_in*) unicast_sockaddr)->sin_addr;
} else {
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) &out_list_curr->in_addrs;
(*addr6) = *(struct sockaddr_in6 *) unicast_sockaddr;
}
out_list_curr->speed = curr_addrs->TransmitLinkSpeed;
// TODO maybe use friendly name instead of adapter GUID
sprintf_s(out_list_curr->ad_name,
sizeof(out_list_curr->ad_name),
curr_addrs->AdapterName);
}
}
ret = 0;
out:
free(adapter_addrs);
if (ret && out_list_head)
free(out_list_head);
else if (ifap)
*ifap = out_list_head;
return ret;
}
void freeifaddrs(struct ifaddrs *ifa)
{
while (ifa) {
struct ifaddrs *next = ifa->ifa_next;
free(ifa);
ifa = next;
}
}
| 3,292 | 28.936364 | 90 | cc |
null | ceph-main/src/common/win32/registry.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#define dout_context cct
#define dout_subsys ceph_subsys_
#include "common/debug.h"
#include "common/errno.h"
#include "common/win32/registry.h"
RegistryKey::RegistryKey(CephContext *cct_, HKEY hRootKey, LPCTSTR strKey,
bool create_value): cct(cct_)
{
DWORD status = RegOpenKeyEx(hRootKey, strKey, 0, KEY_ALL_ACCESS, &hKey);
if (status == ERROR_FILE_NOT_FOUND && create_value)
{
ldout(cct_, 10) << "Creating registry key: " << strKey << dendl;
status = RegCreateKeyEx(
hRootKey, strKey, 0, NULL, REG_OPTION_NON_VOLATILE,
KEY_ALL_ACCESS, NULL, &hKey, NULL);
}
if (ERROR_SUCCESS != status) {
if (ERROR_FILE_NOT_FOUND == status) {
missingKey = true;
} else {
lderr(cct_) << "Error: " << win32_strerror(status)
<< ". Could not open registry key: "
<< strKey << dendl;
}
}
}
RegistryKey::~RegistryKey() {
if (!hKey)
return;
DWORD status = RegCloseKey(hKey);
if (ERROR_SUCCESS != status) {
derr << "Error: " << win32_strerror(status)
<< ". Could not close registry key." << dendl;
} else {
hKey = NULL;
}
}
int RegistryKey::remove(CephContext *cct_, HKEY hRootKey, LPCTSTR strKey)
{
DWORD status = RegDeleteKeyEx(hRootKey, strKey, KEY_WOW64_64KEY, 0);
if (status == ERROR_FILE_NOT_FOUND)
{
ldout(cct_, 20) << "Registry key : " << strKey
<< " does not exist." << dendl;
return 0;
}
if (ERROR_SUCCESS != status) {
lderr(cct_) << "Error: " << win32_strerror(status)
<< ". Could not delete registry key: "
<< strKey << dendl;
return -EINVAL;
}
return 0;
}
int RegistryKey::flush() {
DWORD status = RegFlushKey(hKey);
if (ERROR_SUCCESS != status) {
derr << "Error: " << win32_strerror(status)
<< ". Could not flush registry key." << dendl;
return -EINVAL;
}
return 0;
}
int RegistryKey::set(LPCTSTR lpValue, DWORD data)
{
DWORD status = RegSetValueEx(hKey, lpValue, 0, REG_DWORD,
(LPBYTE)&data, sizeof(DWORD));
if (ERROR_SUCCESS != status) {
derr << "Error: " << win32_strerror(status)
<< ". Could not set registry value: " << (char*)lpValue << dendl;
return -EINVAL;
}
return 0;
}
int RegistryKey::set(LPCTSTR lpValue, std::string data)
{
DWORD status = RegSetValueEx(hKey, lpValue, 0, REG_SZ,
(LPBYTE)data.c_str(), data.length());
if (ERROR_SUCCESS != status) {
derr << "Error: " << win32_strerror(status)
<< ". Could not set registry value: "
<< (char*)lpValue << dendl;
return -EINVAL;
}
return 0;
}
int RegistryKey::get(LPCTSTR lpValue, bool& value)
{
DWORD value_dw = 0;
int r = get(lpValue, value_dw);
if (!r) {
value = !!value_dw;
}
return r;
}
int RegistryKey::get(LPCTSTR lpValue, DWORD& value)
{
DWORD data;
DWORD size = sizeof(data);
DWORD type = REG_DWORD;
DWORD status = RegQueryValueEx(hKey, lpValue, NULL,
&type, (LPBYTE)&data, &size);
if (ERROR_SUCCESS != status) {
derr << "Error: " << win32_strerror(status)
<< ". Could not get registry value: "
<< (char*)lpValue << dendl;
return -EINVAL;
}
value = data;
return 0;
}
int RegistryKey::get(LPCTSTR lpValue, std::string& value)
{
std::string data{""};
DWORD size = 0;
DWORD type = REG_SZ;
DWORD status = RegQueryValueEx(hKey, lpValue, NULL, &type,
(LPBYTE)data.c_str(), &size);
if (ERROR_MORE_DATA == status) {
data.resize(size);
status = RegQueryValueEx(hKey, lpValue, NULL, &type,
(LPBYTE)data.c_str(), &size);
}
if (ERROR_SUCCESS != status) {
derr << "Error: " << win32_strerror(status)
<< ". Could not get registry value: "
<< (char*)lpValue << dendl;
return -EINVAL;
}
value.assign(data.c_str());
return 0;
}
| 4,285 | 24.819277 | 74 | cc |
null | ceph-main/src/common/win32/registry.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "common/ceph_context.h"
class RegistryKey {
public:
RegistryKey(CephContext *cct_, HKEY hRootKey, LPCTSTR strKey, bool create_value);
~RegistryKey();
static remove(CephContext *cct_, HKEY hRootKey, LPCTSTR strKey);
int flush();
int set(LPCTSTR lpValue, DWORD data);
int set(LPCTSTR lpValue, std::string data);
int get(LPCTSTR lpValue, bool& value);
int get(LPCTSTR lpValue, DWORD& value);
int get(LPCTSTR lpValue, std::string& value);
HKEY hKey = NULL;
bool missingKey = false;
private:
CephContext *cct;
};
| 886 | 21.74359 | 83 | h |
null | ceph-main/src/common/win32/service.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#define dout_context cct
#define dout_subsys ceph_subsys_
#include "common/debug.h"
#include "common/errno.h"
#include "common/win32/service.h"
// Initialize the singleton service instance.
ServiceBase *ServiceBase::s_service = NULL;
ServiceBase::ServiceBase(CephContext *cct_): cct(cct_)
{
status.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
status.dwControlsAccepted = SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN;
status.dwCurrentState = SERVICE_START_PENDING;
status.dwWin32ExitCode = NO_ERROR;
status.dwCheckPoint = 0;
/* The estimated time required for the stop operation in ms. */
status.dwWaitHint = 0;
}
/* Register service action callbacks */
int ServiceBase::initialize(ServiceBase *service)
{
s_service = service;
SERVICE_TABLE_ENTRY service_table[] = {
{"", (LPSERVICE_MAIN_FUNCTION)run},
{NULL, NULL}
};
/* StartServiceCtrlDispatcher blocks until the service is stopped. */
if (!StartServiceCtrlDispatcher(service_table)) {
int err = GetLastError();
lderr(service->cct) << "StartServiceCtrlDispatcher error: "
<< err << dendl;
return -EINVAL;
}
return 0;
}
void WINAPI ServiceBase::run()
{
assert(s_service != NULL);
/* Register the control handler. This function is called by the service
* manager to stop the service. The service name that we're passing here
* doesn't have to be valid as we're using SERVICE_WIN32_OWN_PROCESS. */
s_service->hstatus = RegisterServiceCtrlHandler(
"", (LPHANDLER_FUNCTION)control_handler);
if (!s_service->hstatus) {
lderr(s_service->cct) << "Could not initialize service control handler. "
<< "Error: " << GetLastError() << dendl;
return;
}
s_service->set_status(SERVICE_START_PENDING);
// TODO: should we expect exceptions?
ldout(s_service->cct, 0) << "Starting service." << dendl;
int err = s_service->run_hook();
if (err) {
lderr(s_service->cct) << "Failed to start service. Error code: "
<< err << dendl;
s_service->shutdown(true);
} else {
ldout(s_service->cct, 0) << "Successfully started service." << dendl;
s_service->set_status(SERVICE_RUNNING);
}
}
void ServiceBase::shutdown(bool ignore_errors)
{
DWORD original_state = status.dwCurrentState;
set_status(SERVICE_STOP_PENDING);
int err = shutdown_hook();
if (err) {
derr << "Shutdown service hook failed. Error code: " << err << dendl;
if (ignore_errors) {
derr << "Ignoring shutdown hook failure, marking the service as stopped."
<< dendl;
set_status(SERVICE_STOPPED);
} else {
derr << "Reverting to original service state." << dendl;
set_status(original_state);
}
} else {
dout(0) << "Shutdown hook completed." << dendl;
set_status(SERVICE_STOPPED);
}
}
void ServiceBase::stop()
{
DWORD original_state = status.dwCurrentState;
set_status(SERVICE_STOP_PENDING);
int err = stop_hook();
if (err) {
derr << "Service stop hook failed. Error code: " << err << dendl;
set_status(original_state);
} else {
dout(0) << "Successfully stopped service." << dendl;
set_status(SERVICE_STOPPED);
}
}
/* This function is registered with the Windows services manager through
* a call to RegisterServiceCtrlHandler() and will be called by the Windows
* service manager asynchronously to stop the service. */
void ServiceBase::control_handler(DWORD request)
{
switch (request) {
case SERVICE_CONTROL_STOP:
s_service->stop();
break;
case SERVICE_CONTROL_SHUTDOWN:
s_service->shutdown();
break;
default:
break;
}
}
void ServiceBase::set_status(DWORD current_state, DWORD exit_code) {
static DWORD dwCheckPoint = 1;
if (current_state == SERVICE_RUNNING || current_state == SERVICE_STOPPED) {
status.dwCheckPoint = dwCheckPoint++;
}
status.dwCurrentState = current_state;
status.dwWin32ExitCode = exit_code;
if (hstatus) {
dout(5) << "Updating service service status (" << current_state
<< ") and exit code(" << exit_code << ")." << dendl;
::SetServiceStatus(hstatus, &status);
} else {
derr << "Service control handler not initialized. Cannot "
<< "update service status (" << current_state
<< ") and exit code(" << exit_code << ")." << dendl;
}
}
| 4,638 | 28.547771 | 79 | cc |
null | ceph-main/src/common/win32/service.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "common/ceph_context.h"
class ServiceBase {
public:
ServiceBase(CephContext *cct_);
virtual ~ServiceBase() {};
static int initialize(ServiceBase *service);
protected:
static void run();
static void control_handler(DWORD request);
void shutdown(bool ignore_errors = false);
void stop();
void set_status(DWORD current_state, DWORD exit_code = NO_ERROR);
/* Subclasses should implement the following service hooks. */
virtual int run_hook() = 0;
/* Invoked when the service is requested to stop. */
virtual int stop_hook() = 0;
/* Invoked when the system is shutting down. */
virtual int shutdown_hook() = 0;
CephContext *cct;
private:
/* A handle used when reporting the current status. */
SERVICE_STATUS_HANDLE hstatus;
/* The current service status. */
SERVICE_STATUS status;
/* singleton service instance */
static ServiceBase *s_service;
};
| 1,240 | 23.82 | 67 | h |
null | ceph-main/src/common/win32/syslog.cc | #include <windows.h>
#include <syslog.h>
#include "event_logging.h"
#include "common/code_environment.h"
static HANDLE g_event_source = NULL;
bool get_event_source()
{
if (!g_event_source) {
HANDLE temp = RegisterEventSourceA(NULL, get_process_name_cpp().c_str());
if (!temp)
return false;
if (InterlockedCompareExchangePointer(&g_event_source, temp, NULL)) {
// There already was an event source, let's cleanup the one that we've
// just created.
DeregisterEventSource(temp);
}
}
return true;
}
void write_event_log_entry(int level, const char* msg)
{
if (!get_event_source()) {
return;
}
WORD type;
DWORD event_id;
switch (level) {
case LOG_DEBUG:
event_id = SUCCESS_EVENTMSG;
type = EVENTLOG_SUCCESS;
break;
case LOG_INFO:
case LOG_NOTICE:
event_id = INFO_EVENTMSG;
type = EVENTLOG_INFORMATION_TYPE;
break;
case LOG_WARNING:
event_id = WARN_EVENTMSG;
type = EVENTLOG_WARNING_TYPE;
break;
default:
event_id = ERROR_EVENTMSG;
type = EVENTLOG_ERROR_TYPE;
}
ReportEventA(g_event_source, type,
0, event_id, NULL, 1, 0, &msg, NULL);
}
void syslog(int priority, const char* format, ...)
{
va_list args;
va_start(args, format);
size_t length = (size_t)_vscprintf(format, args) + 1;
char* buffer = (char*) malloc(length);
if (NULL == buffer) {
va_end(args);
return;
}
vsnprintf_s(buffer, length, length - 1, format, args);
va_end(args);
write_event_log_entry(LOG_PRI(priority), buffer);
free(buffer);
}
| 1,598 | 19.5 | 77 | cc |
null | ceph-main/src/common/win32/wstring.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Cloudbase Solutions
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "wstring.h"
#include <boost/locale/encoding_utf.hpp>
using boost::locale::conv::utf_to_utf;
std::wstring to_wstring(const std::string& str)
{
return utf_to_utf<wchar_t>(str.c_str(), str.c_str() + str.size());
}
std::string to_string(const std::wstring& str)
{
return utf_to_utf<char>(str.c_str(), str.c_str() + str.size());
}
| 656 | 22.464286 | 68 | cc |
null | ceph-main/src/common/win32/wstring.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Cloudbase Solutions
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <string>
std::wstring to_wstring(const std::string& str);
std::string to_string(const std::wstring& wstr);
| 444 | 22.421053 | 61 | h |
null | ceph-main/src/compressor/CompressionPlugin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef COMPRESSION_PLUGIN_H
#define COMPRESSION_PLUGIN_H
#include <iosfwd>
#include <iostream>
#include "common/PluginRegistry.h"
#include "include/common_fwd.h"
#include "Compressor.h"
namespace ceph {
class CompressionPlugin : public Plugin {
public:
TOPNSPC::CompressorRef compressor;
explicit CompressionPlugin(CephContext *cct)
: Plugin(cct)
{}
~CompressionPlugin() override {}
virtual int factory(TOPNSPC::CompressorRef *cs,
std::ostream *ss) = 0;
virtual const char* name() {return "CompressionPlugin";}
};
}
#endif
| 1,091 | 21.285714 | 71 | h |
null | ceph-main/src/compressor/Compressor.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <random>
#include <sstream>
#include <iterator>
#include <algorithm>
#include "CompressionPlugin.h"
#include "Compressor.h"
#include "include/random.h"
#include "common/ceph_context.h"
#include "common/debug.h"
#include "common/dout.h"
namespace TOPNSPC {
#ifdef HAVE_QATZIP
QatAccel Compressor::qat_accel;
#endif
const char* Compressor::get_comp_alg_name(int a) {
auto p = std::find_if(std::cbegin(compression_algorithms), std::cend(compression_algorithms),
[a](const auto& kv) { return kv.second == a; });
if (std::cend(compression_algorithms) == p)
return "???"; // It would be nice to revise this...
return p->first;
}
std::optional<Compressor::CompressionAlgorithm>
Compressor::get_comp_alg_type(std::string_view s) {
auto p = std::find_if(std::cbegin(compression_algorithms), std::cend(compression_algorithms),
[&s](const auto& kv) { return kv.first == s; });
if (std::cend(compression_algorithms) == p)
return {};
return p->second;
}
const char *Compressor::get_comp_mode_name(int m) {
switch (m) {
case COMP_NONE: return "none";
case COMP_PASSIVE: return "passive";
case COMP_AGGRESSIVE: return "aggressive";
case COMP_FORCE: return "force";
default: return "???";
}
}
std::optional<Compressor::CompressionMode>
Compressor::get_comp_mode_type(std::string_view s) {
if (s == "force")
return COMP_FORCE;
if (s == "aggressive")
return COMP_AGGRESSIVE;
if (s == "passive")
return COMP_PASSIVE;
if (s == "none")
return COMP_NONE;
return {};
}
CompressorRef Compressor::create(CephContext *cct, const std::string &type)
{
// support "random" for teuthology testing
if (type == "random") {
int alg = ceph::util::generate_random_number(0, COMP_ALG_LAST - 1);
if (alg == COMP_ALG_NONE) {
return nullptr;
}
return create(cct, alg);
}
CompressorRef cs_impl = NULL;
std::stringstream ss;
auto reg = cct->get_plugin_registry();
auto factory = dynamic_cast<ceph::CompressionPlugin*>(reg->get_with_load("compressor", type));
if (factory == NULL) {
lderr(cct) << __func__ << " cannot load compressor of type " << type << dendl;
return NULL;
}
int err = factory->factory(&cs_impl, &ss);
if (err)
lderr(cct) << __func__ << " factory return error " << err << dendl;
return cs_impl;
}
CompressorRef Compressor::create(CephContext *cct, int alg)
{
if (alg < 0 || alg >= COMP_ALG_LAST) {
lderr(cct) << __func__ << " invalid algorithm value:" << alg << dendl;
return CompressorRef();
}
std::string type_name = get_comp_alg_name(alg);
return create(cct, type_name);
}
} // namespace TOPNSPC
| 3,081 | 26.274336 | 96 | cc |
null | ceph-main/src/compressor/Compressor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMPRESSOR_H
#define CEPH_COMPRESSOR_H
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include "include/ceph_assert.h" // boost clobbers this
#include "include/common_fwd.h"
#include "include/buffer.h"
#include "include/int_types.h"
#ifdef HAVE_QATZIP
#include "QatAccel.h"
#endif
namespace TOPNSPC {
class Compressor;
typedef std::shared_ptr<Compressor> CompressorRef;
class Compressor {
public:
enum CompressionAlgorithm {
COMP_ALG_NONE = 0,
COMP_ALG_SNAPPY = 1,
COMP_ALG_ZLIB = 2,
COMP_ALG_ZSTD = 3,
#ifdef HAVE_LZ4
COMP_ALG_LZ4 = 4,
#endif
#ifdef HAVE_BROTLI
COMP_ALG_BROTLI = 5,
#endif
COMP_ALG_LAST //the last value for range checks
};
using pair_type = std::pair<const char*, CompressionAlgorithm>;
static constexpr std::initializer_list<pair_type> compression_algorithms {
{ "none", COMP_ALG_NONE },
{ "snappy", COMP_ALG_SNAPPY },
{ "zlib", COMP_ALG_ZLIB },
{ "zstd", COMP_ALG_ZSTD },
#ifdef HAVE_LZ4
{ "lz4", COMP_ALG_LZ4 },
#endif
#ifdef HAVE_BROTLI
{ "brotli", COMP_ALG_BROTLI },
#endif
};
// compression options
enum CompressionMode {
COMP_NONE, ///< compress never
COMP_PASSIVE, ///< compress if hinted COMPRESSIBLE
COMP_AGGRESSIVE, ///< compress unless hinted INCOMPRESSIBLE
COMP_FORCE ///< compress always
};
#ifdef HAVE_QATZIP
bool qat_enabled;
static QatAccel qat_accel;
#endif
static const char* get_comp_alg_name(int a);
static std::optional<CompressionAlgorithm> get_comp_alg_type(std::string_view s);
static const char *get_comp_mode_name(int m);
static std::optional<CompressionMode> get_comp_mode_type(std::string_view s);
Compressor(CompressionAlgorithm a, const char* t) : alg(a), type(t) {
}
virtual ~Compressor() {}
const std::string& get_type_name() const {
return type;
}
CompressionAlgorithm get_type() const {
return alg;
}
virtual int compress(const ceph::bufferlist &in, ceph::bufferlist &out, std::optional<int32_t> &compressor_message) = 0;
virtual int decompress(const ceph::bufferlist &in, ceph::bufferlist &out, std::optional<int32_t> compressor_message) = 0;
// this is a bit weird but we need non-const iterator to be in
// alignment with decode methods
virtual int decompress(ceph::bufferlist::const_iterator &p, size_t compressed_len, ceph::bufferlist &out, std::optional<int32_t> compressor_message) = 0;
static CompressorRef create(CephContext *cct, const std::string &type);
static CompressorRef create(CephContext *cct, int alg);
protected:
CompressionAlgorithm alg;
std::string type;
};
} // namespace TOPNSPC
#endif
| 3,134 | 27.5 | 155 | h |
null | ceph-main/src/compressor/QatAccel.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <qatzip.h>
#include "common/ceph_context.h"
#include "common/common_init.h"
#include "common/debug.h"
#include "common/dout.h"
#include "common/errno.h"
#include "QatAccel.h"
// -----------------------------------------------------------------------------
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_compressor
#undef dout_prefix
#define dout_prefix _prefix(_dout)
static std::ostream& _prefix(std::ostream* _dout)
{
return *_dout << "QatAccel: ";
}
// -----------------------------------------------------------------------------
// default window size for Zlib 1.2.8, negated for raw deflate
#define ZLIB_DEFAULT_WIN_SIZE -15
/* Estimate data expansion after decompression */
static const unsigned int expansion_ratio[] = {5, 20, 50, 100, 200, 1000, 10000};
void QzSessionDeleter::operator() (struct QzSession_S *session) {
qzTeardownSession(session);
delete session;
}
static bool get_qz_params(const std::string &alg, QzSessionParams_T ¶ms) {
int rc;
rc = qzGetDefaults(¶ms);
if (rc != QZ_OK)
return false;
params.direction = QZ_DIR_BOTH;
params.is_busy_polling = true;
if (alg == "zlib") {
params.comp_algorithm = QZ_DEFLATE;
params.data_fmt = QZ_DEFLATE_RAW;
params.comp_lvl = g_ceph_context->_conf->compressor_zlib_level;
}
else {
// later, there also has lz4.
return false;
}
rc = qzSetDefaults(¶ms);
if (rc != QZ_OK)
return false;
return true;
}
static bool setup_session(QatAccel::session_ptr &session, QzSessionParams_T ¶ms) {
int rc;
rc = qzInit(session.get(), QZ_SW_BACKUP_DEFAULT);
if (rc != QZ_OK && rc != QZ_DUPLICATE)
return false;
rc = qzSetupSession(session.get(), ¶ms);
if (rc != QZ_OK) {
return false;
}
return true;
}
// put the session back to the session pool in a RAII manner
struct cached_session_t {
cached_session_t(QatAccel* accel, QatAccel::session_ptr&& sess)
: accel{accel}, session{std::move(sess)} {}
~cached_session_t() {
std::scoped_lock lock{accel->mutex};
// if the cache size is still under its upper bound, the current session is put into
// accel->sessions. otherwise it's released right
uint64_t sessions_num = g_ceph_context->_conf.get_val<uint64_t>("qat_compressor_session_max_number");
if (accel->sessions.size() < sessions_num) {
accel->sessions.push_back(std::move(session));
}
}
struct QzSession_S* get() {
assert(static_cast<bool>(session));
return session.get();
}
QatAccel* accel;
QatAccel::session_ptr session;
};
QatAccel::session_ptr QatAccel::get_session() {
{
std::scoped_lock lock{mutex};
if (!sessions.empty()) {
auto session = std::move(sessions.back());
sessions.pop_back();
return session;
}
}
// If there are no available session to use, we try allocate a new
// session.
QzSessionParams_T params = {(QzHuffmanHdr_T)0,};
session_ptr session(new struct QzSession_S());
memset(session.get(), 0, sizeof(struct QzSession_S));
if (get_qz_params(alg_name, params) && setup_session(session, params)) {
return session;
} else {
return nullptr;
}
}
QatAccel::QatAccel() {}
QatAccel::~QatAccel() {
// First, we should uninitialize all QATzip session that disconnects all session
// from a hardware instance and deallocates buffers.
sessions.clear();
// Then we close the connection with QAT.
// where the value of the parameter passed to qzClose() does not matter. as long as
// it is not nullptr.
qzClose((QzSession_T*)1);
}
bool QatAccel::init(const std::string &alg) {
std::scoped_lock lock(mutex);
if (!alg_name.empty()) {
return true;
}
dout(15) << "First use for QAT compressor" << dendl;
if (alg != "zlib") {
return false;
}
alg_name = alg;
return true;
}
int QatAccel::compress(const bufferlist &in, bufferlist &out, std::optional<int32_t> &compressor_message) {
auto s = get_session(); // get a session from the pool
if (!s) {
return -1; // session initialization failed
}
auto session = cached_session_t{this, std::move(s)}; // returns to the session pool on destruction
compressor_message = ZLIB_DEFAULT_WIN_SIZE;
int begin = 1;
for (auto &i : in.buffers()) {
const unsigned char* c_in = (unsigned char*) i.c_str();
unsigned int len = i.length();
unsigned int out_len = qzMaxCompressedLength(len, session.get()) + begin;
bufferptr ptr = buffer::create_small_page_aligned(out_len);
unsigned char* c_out = (unsigned char*)ptr.c_str() + begin;
int rc = qzCompress(session.get(), c_in, &len, c_out, &out_len, 1);
if (rc != QZ_OK)
return -1;
if (begin) {
// put a compressor variation mark in front of compressed stream, not used at the moment
ptr.c_str()[0] = 0;
out_len += begin;
begin = 0;
}
out.append(ptr, 0, out_len);
}
return 0;
}
int QatAccel::decompress(const bufferlist &in, bufferlist &out, std::optional<int32_t> compressor_message) {
auto i = in.begin();
return decompress(i, in.length(), out, compressor_message);
}
int QatAccel::decompress(bufferlist::const_iterator &p,
size_t compressed_len,
bufferlist &dst,
std::optional<int32_t> compressor_message) {
auto s = get_session(); // get a session from the pool
if (!s) {
return -1; // session initialization failed
}
auto session = cached_session_t{this, std::move(s)}; // returns to the session pool on destruction
int begin = 1;
int rc = 0;
bufferlist tmp;
size_t remaining = std::min<size_t>(p.get_remaining(), compressed_len);
while (remaining) {
unsigned int ratio_idx = 0;
const char* c_in = nullptr;
unsigned int len = p.get_ptr_and_advance(remaining, &c_in);
remaining -= len;
len -= begin;
c_in += begin;
begin = 0;
unsigned int out_len = QZ_HW_BUFF_SZ;
bufferptr ptr;
do {
while (out_len <= len * expansion_ratio[ratio_idx]) {
out_len *= 2;
}
ptr = buffer::create_small_page_aligned(out_len);
rc = qzDecompress(session.get(), (const unsigned char*)c_in, &len, (unsigned char*)ptr.c_str(), &out_len);
ratio_idx++;
} while (rc == QZ_BUF_ERROR && ratio_idx < std::size(expansion_ratio));
if (rc == QZ_OK) {
dst.append(ptr, 0, out_len);
} else if (rc == QZ_DATA_ERROR) {
dout(1) << "QAT compressor DATA ERROR" << dendl;
return -1;
} else if (rc == QZ_BUF_ERROR) {
dout(1) << "QAT compressor BUF ERROR" << dendl;
return -1;
} else if (rc != QZ_OK) {
dout(1) << "QAT compressor NOT OK" << dendl;
return -1;
}
}
return 0;
}
| 7,003 | 28.062241 | 112 | cc |
null | ceph-main/src/compressor/QatAccel.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_QATACCEL_H
#define CEPH_QATACCEL_H
#include <condition_variable>
#include <memory>
#include <mutex>
#include <optional>
#include <vector>
#include "include/buffer.h"
extern "C" struct QzSession_S; // typedef struct QzSession_S QzSession_T;
struct QzSessionDeleter {
void operator() (struct QzSession_S *session);
};
class QatAccel {
public:
using session_ptr = std::unique_ptr<struct QzSession_S, QzSessionDeleter>;
QatAccel();
~QatAccel();
bool init(const std::string &alg);
int compress(const bufferlist &in, bufferlist &out, std::optional<int32_t> &compressor_message);
int decompress(const bufferlist &in, bufferlist &out, std::optional<int32_t> compressor_message);
int decompress(bufferlist::const_iterator &p, size_t compressed_len, bufferlist &dst, std::optional<int32_t> compressor_message);
private:
// get a session from the pool or create a new one. returns null if session init fails
session_ptr get_session();
friend struct cached_session_t;
std::vector<session_ptr> sessions;
std::mutex mutex;
std::string alg_name;
};
#endif
| 1,455 | 25.472727 | 131 | h |
null | ceph-main/src/compressor/brotli/BrotliCompressor.cc | #include "brotli/encode.h"
#include "brotli/decode.h"
#include "BrotliCompressor.h"
#include "include/scope_guard.h"
#define MAX_LEN (CEPH_PAGE_SIZE)
int BrotliCompressor::compress(const bufferlist &in, bufferlist &out, boost::optional<int32_t> &compressor_message)
{
BrotliEncoderState* s = BrotliEncoderCreateInstance(nullptr,
nullptr,
nullptr);
if (!s) {
return -1;
}
auto sg = make_scope_guard([&s] { BrotliEncoderDestroyInstance(s); });
BrotliEncoderSetParameter(s, BROTLI_PARAM_QUALITY, (uint32_t)9);
BrotliEncoderSetParameter(s, BROTLI_PARAM_LGWIN, 22);
for (auto i = in.buffers().begin(); i != in.buffers().end();) {
size_t available_in = i->length();
size_t max_comp_size = BrotliEncoderMaxCompressedSize(available_in);
size_t available_out = max_comp_size;
bufferptr ptr = buffer::create_small_page_aligned(max_comp_size);
uint8_t* next_out = (uint8_t*)ptr.c_str();
const uint8_t* next_in = (uint8_t*)i->c_str();
++i;
BrotliEncoderOperation finish = i != in.buffers().end() ?
BROTLI_OPERATION_PROCESS :
BROTLI_OPERATION_FINISH;
do {
if (!BrotliEncoderCompressStream(s,
finish,
&available_in,
&next_in,
&available_out,
&next_out,
nullptr)) {
return -1;
}
unsigned have = max_comp_size - available_out;
out.append(ptr, 0, have);
} while (available_out == 0);
if (BrotliEncoderIsFinished(s)) {
break;
}
}
return 0;
}
int BrotliCompressor::decompress(bufferlist::const_iterator &p,
size_t compressed_size,
bufferlist &out,
boost::optional<int32_t> compressor_message)
{
BrotliDecoderState* s = BrotliDecoderCreateInstance(nullptr,
nullptr,
nullptr);
if (!s) {
return -1;
}
auto sg = make_scope_guard([&s] { BrotliDecoderDestroyInstance(s); });
size_t remaining = std::min<size_t>(p.get_remaining(), compressed_size);
while (remaining) {
const uint8_t* next_in;
size_t len = p.get_ptr_and_advance(remaining, (const char**)&next_in);
remaining -= len;
size_t available_in = len;
BrotliDecoderResult result = BROTLI_DECODER_RESULT_ERROR;
do {
size_t available_out = MAX_LEN;
bufferptr ptr = buffer::create_page_aligned(MAX_LEN);
uint8_t* next_out = (uint8_t*)ptr.c_str();
result = BrotliDecoderDecompressStream(s,
&available_in,
&next_in,
&available_out,
&next_out,
0);
if (!result) {
return -1;
}
unsigned have = MAX_LEN - available_out;
out.append(ptr, 0, have);
} while (result == BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT);
if (BrotliDecoderIsFinished(s)) {
break;
}
}
return 0;
}
int BrotliCompressor::decompress(const bufferlist &in, bufferlist &out, boost::optional<int32_t> compressor_message)
{
auto i = std::cbegin(in);
return decompress(i, in.length(), out, compressor_message);
}
| 3,661 | 36.752577 | 117 | cc |
null | ceph-main/src/compressor/brotli/BrotliCompressor.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 BI SHUN KE <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BROTLICOMPRESSOR_H
#define CEPH_BROTLICOMPRESSOR_H
#include "include/buffer.h"
#include "compressor/Compressor.h"
class BrotliCompressor : public Compressor
{
public:
BrotliCompressor() : Compressor(COMP_ALG_BROTLI, "brotli") {}
int compress(const bufferlist &in, bufferlist &out, boost::optional<int32_t> &compressor_message) override;
int decompress(const bufferlist &in, bufferlist &out, boost::optional<int32_t> compressor_message) override;
int decompress(bufferlist::const_iterator &p, size_t compressed_len, bufferlist &out, boost::optional<int32_t> compressor_message) override;
};
#endif //CEPH_BROTLICOMPRESSOR_H
| 982 | 29.71875 | 142 | h |
null | ceph-main/src/compressor/brotli/CompressionPluginBrotli.cc | #include "acconfig.h"
#include "ceph_ver.h"
#include "CompressionPluginBrotli.h"
#include "common/ceph_context.h"
const char *__ceph_plugin_version()
{
return CEPH_GIT_NICE_VER;
}
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name)
{
PluginRegistry *instance = cct->get_plugin_registry();
return instance->add(type, name, new CompressionPluginBrotli(cct));
}
| 454 | 21.75 | 69 | cc |
null | ceph-main/src/compressor/brotli/CompressionPluginBrotli.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 BI SHUN KE <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMPRESSION_PLUGIN_BROTLI_H
#define CEPH_COMPRESSION_PLUGIN_BROTLI_H
#include "ceph_ver.h"
#include "compressor/CompressionPlugin.h"
#include "BrotliCompressor.h"
class CompressionPluginBrotli : public CompressionPlugin {
public:
explicit CompressionPluginBrotli(CephContext *cct) : CompressionPlugin(cct)
{}
virtual int factory(CompressorRef *cs, std::ostream *ss)
{
if (compressor == nullptr) {
BrotliCompressor *interface = new BrotliCompressor();
compressor = CompressorRef(interface);
}
*cs = compressor;
return 0;
}
};
#endif
| 916 | 23.783784 | 77 | h |
null | ceph-main/src/compressor/lz4/CompressionPluginLZ4.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 XSKY Inc.
*
* Author: Haomai Wang <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "acconfig.h"
#include "ceph_ver.h"
#include "common/ceph_context.h"
#include "CompressionPluginLZ4.h"
// -----------------------------------------------------------------------------
const char *__ceph_plugin_version()
{
return CEPH_GIT_NICE_VER;
}
// -----------------------------------------------------------------------------
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name)
{
auto instance = cct->get_plugin_registry();
return instance->add(type, name, new CompressionPluginLZ4(cct));
}
| 1,006 | 26.216216 | 80 | cc |
null | ceph-main/src/compressor/lz4/CompressionPluginLZ4.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 XSKY Inc.
*
* Author: Haomai Wang <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_COMPRESSION_PLUGIN_LZ4_H
#define CEPH_COMPRESSION_PLUGIN_LZ4_H
// -----------------------------------------------------------------------------
#include "ceph_ver.h"
#include "compressor/CompressionPlugin.h"
#include "LZ4Compressor.h"
// -----------------------------------------------------------------------------
class CompressionPluginLZ4 : public ceph::CompressionPlugin {
public:
explicit CompressionPluginLZ4(CephContext* cct) : CompressionPlugin(cct)
{}
int factory(CompressorRef *cs, std::ostream *ss) override {
if (compressor == 0) {
LZ4Compressor *interface = new LZ4Compressor(cct);
compressor = CompressorRef(interface);
}
*cs = compressor;
return 0;
}
};
#endif
| 1,138 | 26.119048 | 80 | h |
null | ceph-main/src/compressor/lz4/LZ4Compressor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LZ4COMPRESSOR_H
#define CEPH_LZ4COMPRESSOR_H
#include <optional>
#include <lz4.h>
#include "compressor/Compressor.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include "common/config.h"
class LZ4Compressor : public Compressor {
public:
LZ4Compressor(CephContext* cct) : Compressor(COMP_ALG_LZ4, "lz4") {
#ifdef HAVE_QATZIP
if (cct->_conf->qat_compressor_enabled && qat_accel.init("lz4"))
qat_enabled = true;
else
qat_enabled = false;
#endif
}
int compress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> &compressor_message) override {
// older versions of liblz4 introduce bit errors when compressing
// fragmented buffers. this was fixed in lz4 commit
// af127334670a5e7b710bbd6adb71aa7c3ef0cd72, which first
// appeared in v1.8.2.
//
// workaround: rebuild if not contiguous.
if (!src.is_contiguous()) {
ceph::buffer::list new_src = src;
new_src.rebuild();
return compress(new_src, dst, compressor_message);
}
#ifdef HAVE_QATZIP
if (qat_enabled)
return qat_accel.compress(src, dst, compressor_message);
#endif
ceph::buffer::ptr outptr = ceph::buffer::create_small_page_aligned(
LZ4_compressBound(src.length()));
LZ4_stream_t lz4_stream;
LZ4_resetStream(&lz4_stream);
using ceph::encode;
auto p = src.begin();
size_t left = src.length();
int pos = 0;
const char *data;
unsigned num = src.get_num_buffers();
encode((uint32_t)num, dst);
while (left) {
uint32_t origin_len = p.get_ptr_and_advance(left, &data);
int compressed_len = LZ4_compress_fast_continue(
&lz4_stream, data, outptr.c_str()+pos, origin_len,
outptr.length()-pos, 1);
if (compressed_len <= 0)
return -1;
pos += compressed_len;
left -= origin_len;
encode(origin_len, dst);
encode((uint32_t)compressed_len, dst);
}
ceph_assert(p.end());
dst.append(outptr, 0, pos);
return 0;
}
int decompress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> compressor_message) override {
#ifdef HAVE_QATZIP
if (qat_enabled)
return qat_accel.decompress(src, dst, compressor_message);
#endif
auto i = std::cbegin(src);
return decompress(i, src.length(), dst, compressor_message);
}
int decompress(ceph::buffer::list::const_iterator &p,
size_t compressed_len,
ceph::buffer::list &dst,
std::optional<int32_t> compressor_message) override {
#ifdef HAVE_QATZIP
if (qat_enabled)
return qat_accel.decompress(p, compressed_len, dst, compressor_message);
#endif
using ceph::decode;
uint32_t count;
decode(count, p);
std::vector<std::pair<uint32_t, uint32_t> > compressed_pairs(count);
uint32_t total_origin = 0;
for (auto& [dst_size, src_size] : compressed_pairs) {
decode(dst_size, p);
decode(src_size, p);
total_origin += dst_size;
}
compressed_len -= (sizeof(uint32_t) + sizeof(uint32_t) * count * 2);
ceph::buffer::ptr dstptr(total_origin);
LZ4_streamDecode_t lz4_stream_decode;
LZ4_setStreamDecode(&lz4_stream_decode, nullptr, 0);
ceph::buffer::ptr cur_ptr = p.get_current_ptr();
ceph::buffer::ptr *ptr = &cur_ptr;
std::optional<ceph::buffer::ptr> data_holder;
if (compressed_len != cur_ptr.length()) {
data_holder.emplace(compressed_len);
p.copy_deep(compressed_len, *data_holder);
ptr = &*data_holder;
}
char *c_in = ptr->c_str();
char *c_out = dstptr.c_str();
for (unsigned i = 0; i < count; ++i) {
int r = LZ4_decompress_safe_continue(
&lz4_stream_decode, c_in, c_out, compressed_pairs[i].second, compressed_pairs[i].first);
if (r == (int)compressed_pairs[i].first) {
c_in += compressed_pairs[i].second;
c_out += compressed_pairs[i].first;
} else if (r < 0) {
return -1;
} else {
return -2;
}
}
dst.push_back(std::move(dstptr));
return 0;
}
};
#endif
| 4,494 | 29.371622 | 126 | h |
null | ceph-main/src/compressor/snappy/CompressionPluginSnappy.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// -----------------------------------------------------------------------------
#include "acconfig.h"
#include "ceph_ver.h"
#include "common/ceph_context.h"
#include "CompressionPluginSnappy.h"
// -----------------------------------------------------------------------------
const char *__ceph_plugin_version()
{
return CEPH_GIT_NICE_VER;
}
// -----------------------------------------------------------------------------
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name)
{
auto instance = cct->get_plugin_registry();
return instance->add(type, name, new CompressionPluginSnappy(cct));
}
| 1,106 | 27.384615 | 80 | cc |
null | ceph-main/src/compressor/snappy/CompressionPluginSnappy.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_COMPRESSION_PLUGIN_SNAPPY_H
#define CEPH_COMPRESSION_PLUGIN_SNAPPY_H
// -----------------------------------------------------------------------------
#include "compressor/CompressionPlugin.h"
#include "SnappyCompressor.h"
// -----------------------------------------------------------------------------
class CompressionPluginSnappy : public ceph::CompressionPlugin {
public:
explicit CompressionPluginSnappy(CephContext* cct) : CompressionPlugin(cct)
{}
int factory(CompressorRef *cs,
std::ostream *ss) override
{
if (compressor == 0) {
SnappyCompressor *interface = new SnappyCompressor(cct);
compressor = CompressorRef(interface);
}
*cs = compressor;
return 0;
}
};
#endif
| 1,173 | 26.302326 | 80 | h |
null | ceph-main/src/compressor/snappy/SnappyCompressor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SNAPPYCOMPRESSOR_H
#define CEPH_SNAPPYCOMPRESSOR_H
#include <snappy.h>
#include <snappy-sinksource.h>
#include "common/config.h"
#include "compressor/Compressor.h"
#include "include/buffer.h"
class CEPH_BUFFER_API BufferlistSource : public snappy::Source {
ceph::bufferlist::const_iterator pb;
size_t remaining;
public:
explicit BufferlistSource(ceph::bufferlist::const_iterator _pb, size_t _input_len)
: pb(_pb),
remaining(_input_len) {
remaining = std::min(remaining, (size_t)pb.get_remaining());
}
size_t Available() const override {
return remaining;
}
const char *Peek(size_t *len) override {
const char *data = NULL;
*len = 0;
size_t avail = Available();
if (avail) {
auto ptmp = pb;
*len = ptmp.get_ptr_and_advance(avail, &data);
}
return data;
}
void Skip(size_t n) override {
ceph_assert(n <= remaining);
pb += n;
remaining -= n;
}
ceph::bufferlist::const_iterator get_pos() const {
return pb;
}
};
class SnappyCompressor : public Compressor {
public:
SnappyCompressor(CephContext* cct) : Compressor(COMP_ALG_SNAPPY, "snappy") {
#ifdef HAVE_QATZIP
if (cct->_conf->qat_compressor_enabled && qat_accel.init("snappy"))
qat_enabled = true;
else
qat_enabled = false;
#endif
}
int compress(const ceph::bufferlist &src, ceph::bufferlist &dst, std::optional<int32_t> &compressor_message) override {
#ifdef HAVE_QATZIP
if (qat_enabled)
return qat_accel.compress(src, dst, compressor_message);
#endif
BufferlistSource source(const_cast<ceph::bufferlist&>(src).begin(), src.length());
ceph::bufferptr ptr = ceph::buffer::create_small_page_aligned(
snappy::MaxCompressedLength(src.length()));
snappy::UncheckedByteArraySink sink(ptr.c_str());
snappy::Compress(&source, &sink);
dst.append(ptr, 0, sink.CurrentDestination() - ptr.c_str());
return 0;
}
int decompress(const ceph::bufferlist &src, ceph::bufferlist &dst, std::optional<int32_t> compressor_message) override {
#ifdef HAVE_QATZIP
if (qat_enabled)
return qat_accel.decompress(src, dst, compressor_message);
#endif
auto i = src.begin();
return decompress(i, src.length(), dst, compressor_message);
}
int decompress(ceph::bufferlist::const_iterator &p,
size_t compressed_len,
ceph::bufferlist &dst,
std::optional<int32_t> compressor_message) override {
#ifdef HAVE_QATZIP
if (qat_enabled)
return qat_accel.decompress(p, compressed_len, dst, compressor_message);
#endif
BufferlistSource source_1(p, compressed_len);
uint32_t res_len = 0;
if (!snappy::GetUncompressedLength(&source_1, &res_len)) {
return -1;
}
BufferlistSource source_2(p, compressed_len);
ceph::bufferptr ptr(res_len);
if (snappy::RawUncompress(&source_2, ptr.c_str())) {
p = source_2.get_pos();
dst.append(ptr);
return 0;
}
return -2;
}
};
#endif
| 3,386 | 27.948718 | 122 | h |
null | ceph-main/src/compressor/zlib/CompressionPluginZlib.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// -----------------------------------------------------------------------------
#include "acconfig.h"
#include "ceph_ver.h"
#include "common/ceph_context.h"
#include "CompressionPluginZlib.h"
// -----------------------------------------------------------------------------
const char *__ceph_plugin_version()
{
return CEPH_GIT_NICE_VER;
}
// -----------------------------------------------------------------------------
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name)
{
auto instance = cct->get_plugin_registry();
return instance->add(type, name, new CompressionPluginZlib(cct));
}
| 1,102 | 27.282051 | 80 | cc |
null | ceph-main/src/compressor/zlib/CompressionPluginZlib.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_COMPRESSION_PLUGIN_ZLIB_H
#define CEPH_COMPRESSION_PLUGIN_ZLIB_H
// -----------------------------------------------------------------------------
#include "arch/probe.h"
#include "arch/intel.h"
#include "arch/arm.h"
#include "common/ceph_context.h"
#include "compressor/CompressionPlugin.h"
#include "ZlibCompressor.h"
// -----------------------------------------------------------------------------
class CompressionPluginZlib : public ceph::CompressionPlugin {
public:
bool has_isal = false;
explicit CompressionPluginZlib(CephContext *cct) : CompressionPlugin(cct)
{}
int factory(CompressorRef *cs,
std::ostream *ss) override
{
bool isal = false;
#if defined(__i386__) || defined(__x86_64__)
// other arches or lack of support result in isal = false
if (cct->_conf->compressor_zlib_isal) {
ceph_arch_probe();
isal = (ceph_arch_intel_pclmul && ceph_arch_intel_sse41);
}
#elif defined(__aarch64__)
if (cct->_conf->compressor_zlib_isal) {
ceph_arch_probe();
isal = (ceph_arch_aarch64_pmull && ceph_arch_neon);
}
#endif
if (compressor == 0 || has_isal != isal) {
compressor = std::make_shared<ZlibCompressor>(cct, isal);
has_isal = isal;
}
*cs = compressor;
return 0;
}
};
#endif
| 1,726 | 27.311475 | 80 | h |
null | ceph-main/src/compressor/zlib/ZlibCompressor.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// -----------------------------------------------------------------------------
#include "common/debug.h"
#include "ZlibCompressor.h"
#include "osd/osd_types.h"
#include "isa-l/include/igzip_lib.h"
// -----------------------------------------------------------------------------
#include <zlib.h>
// -----------------------------------------------------------------------------
#define dout_context cct
#define dout_subsys ceph_subsys_compressor
#undef dout_prefix
#define dout_prefix _prefix(_dout)
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
using std::ostream;
using ceph::bufferlist;
using ceph::bufferptr;
static ostream&
_prefix(std::ostream* _dout)
{
return *_dout << "ZlibCompressor: ";
}
// -----------------------------------------------------------------------------
#define MAX_LEN (CEPH_PAGE_SIZE)
// default window size for Zlib 1.2.8, negated for raw deflate
#define ZLIB_DEFAULT_WIN_SIZE -15
// desired memory usage level. increasing to 9 doesn't speed things up
// significantly (helps only on >=16K blocks) and sometimes degrades
// compression ratio.
#define ZLIB_MEMORY_LEVEL 8
int ZlibCompressor::zlib_compress(const bufferlist &in, bufferlist &out, std::optional<int32_t> &compressor_message)
{
int ret;
unsigned have;
z_stream strm;
unsigned char* c_in;
int begin = 1;
/* allocate deflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
ret = deflateInit2(&strm, cct->_conf->compressor_zlib_level, Z_DEFLATED, cct->_conf->compressor_zlib_winsize, ZLIB_MEMORY_LEVEL, Z_DEFAULT_STRATEGY);
if (ret != Z_OK) {
dout(1) << "Compression init error: init return "
<< ret << " instead of Z_OK" << dendl;
return -1;
}
compressor_message = cct->_conf->compressor_zlib_winsize;
for (ceph::bufferlist::buffers_t::const_iterator i = in.buffers().begin();
i != in.buffers().end();) {
c_in = (unsigned char*) (*i).c_str();
long unsigned int len = (*i).length();
++i;
strm.avail_in = len;
int flush = i != in.buffers().end() ? Z_NO_FLUSH : Z_FINISH;
strm.next_in = c_in;
do {
bufferptr ptr = ceph::buffer::create_page_aligned(MAX_LEN);
strm.next_out = (unsigned char*)ptr.c_str() + begin;
strm.avail_out = MAX_LEN - begin;
if (begin) {
// put a compressor variation mark in front of compressed stream, not used at the moment
ptr.c_str()[0] = 0;
begin = 0;
}
ret = deflate(&strm, flush); /* no bad return value */
if (ret == Z_STREAM_ERROR) {
dout(1) << "Compression error: compress return Z_STREAM_ERROR("
<< ret << ")" << dendl;
deflateEnd(&strm);
return -1;
}
have = MAX_LEN - strm.avail_out;
out.append(ptr, 0, have);
} while (strm.avail_out == 0);
if (strm.avail_in != 0) {
dout(10) << "Compression error: unused input" << dendl;
deflateEnd(&strm);
return -1;
}
}
deflateEnd(&strm);
return 0;
}
#if (__x86_64__ && defined(HAVE_NASM_X64_AVX2)) || defined(__aarch64__)
int ZlibCompressor::isal_compress(const bufferlist &in, bufferlist &out, std::optional<int32_t> &compressor_message)
{
int ret;
unsigned have;
isal_zstream strm;
unsigned char* c_in;
int begin = 1;
/* allocate deflate state */
isal_deflate_init(&strm);
strm.end_of_stream = 0;
compressor_message = ZLIB_DEFAULT_WIN_SIZE;
for (ceph::bufferlist::buffers_t::const_iterator i = in.buffers().begin();
i != in.buffers().end();) {
c_in = (unsigned char*) (*i).c_str();
long unsigned int len = (*i).length();
++i;
strm.avail_in = len;
strm.end_of_stream = (i == in.buffers().end());
strm.flush = FINISH_FLUSH;
strm.next_in = c_in;
do {
bufferptr ptr = ceph::buffer::create_page_aligned(MAX_LEN);
strm.next_out = (unsigned char*)ptr.c_str() + begin;
strm.avail_out = MAX_LEN - begin;
if (begin) {
// put a compressor variation mark in front of compressed stream, not used at the moment
ptr.c_str()[0] = 1;
begin = 0;
}
ret = isal_deflate(&strm);
if (ret != COMP_OK) {
dout(1) << "Compression error: isal_deflate return error ("
<< ret << ")" << dendl;
return -1;
}
have = MAX_LEN - strm.avail_out;
out.append(ptr, 0, have);
} while (strm.avail_out == 0);
if (strm.avail_in != 0) {
dout(10) << "Compression error: unused input" << dendl;
return -1;
}
}
return 0;
}
#endif
int ZlibCompressor::compress(const bufferlist &in, bufferlist &out, std::optional<int32_t> &compressor_message)
{
#ifdef HAVE_QATZIP
if (qat_enabled)
return qat_accel.compress(in, out, compressor_message);
#endif
#if (__x86_64__ && defined(HAVE_NASM_X64_AVX2)) || defined(__aarch64__)
if (isal_enabled)
return isal_compress(in, out, compressor_message);
else
return zlib_compress(in, out, compressor_message);
#else
return zlib_compress(in, out, compressor_message);
#endif
}
int ZlibCompressor::decompress(bufferlist::const_iterator &p, size_t compressed_size, bufferlist &out, std::optional<int32_t> compressor_message)
{
#ifdef HAVE_QATZIP
// QAT can only decompress with the default window size
if (qat_enabled && (!compressor_message || *compressor_message == ZLIB_DEFAULT_WIN_SIZE))
return qat_accel.decompress(p, compressed_size, out, compressor_message);
#endif
int ret;
unsigned have;
z_stream strm;
const char* c_in;
int begin = 1;
/* allocate inflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = 0;
strm.next_in = Z_NULL;
// choose the variation of compressor
if (!compressor_message)
compressor_message = ZLIB_DEFAULT_WIN_SIZE;
ret = inflateInit2(&strm, *compressor_message);
if (ret != Z_OK) {
dout(1) << "Decompression init error: init return "
<< ret << " instead of Z_OK" << dendl;
return -1;
}
size_t remaining = std::min<size_t>(p.get_remaining(), compressed_size);
while(remaining) {
long unsigned int len = p.get_ptr_and_advance(remaining, &c_in);
remaining -= len;
strm.avail_in = len - begin;
strm.next_in = (unsigned char*)c_in + begin;
begin = 0;
do {
strm.avail_out = MAX_LEN;
bufferptr ptr = ceph::buffer::create_page_aligned(MAX_LEN);
strm.next_out = (unsigned char*)ptr.c_str();
ret = inflate(&strm, Z_NO_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END && ret != Z_BUF_ERROR) {
dout(1) << "Decompression error: decompress return "
<< ret << dendl;
inflateEnd(&strm);
return -1;
}
have = MAX_LEN - strm.avail_out;
out.append(ptr, 0, have);
} while (strm.avail_out == 0);
}
/* clean up and return */
(void)inflateEnd(&strm);
return 0;
}
int ZlibCompressor::decompress(const bufferlist &in, bufferlist &out, std::optional<int32_t> compressor_message)
{
auto i = std::cbegin(in);
return decompress(i, in.length(), out, compressor_message);
}
| 7,601 | 29.047431 | 151 | cc |
null | ceph-main/src/compressor/zlib/ZlibCompressor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_COMPRESSION_ZLIB_H
#define CEPH_COMPRESSION_ZLIB_H
#include "common/config.h"
#include "compressor/Compressor.h"
class ZlibCompressor : public Compressor {
bool isal_enabled;
CephContext *const cct;
public:
ZlibCompressor(CephContext *cct, bool isal)
: Compressor(COMP_ALG_ZLIB, "zlib"), isal_enabled(isal), cct(cct) {
#ifdef HAVE_QATZIP
if (cct->_conf->qat_compressor_enabled && qat_accel.init("zlib"))
qat_enabled = true;
else
qat_enabled = false;
#endif
}
int compress(const ceph::buffer::list &in, ceph::buffer::list &out, std::optional<int32_t> &compressor_message) override;
int decompress(const ceph::buffer::list &in, ceph::buffer::list &out, std::optional<int32_t> compressor_message) override;
int decompress(ceph::buffer::list::const_iterator &p, size_t compressed_len, ceph::buffer::list &out, std::optional<int32_t> compressor_message) override;
private:
int zlib_compress(const ceph::buffer::list &in, ceph::buffer::list &out, std::optional<int32_t> &compressor_message);
int isal_compress(const ceph::buffer::list &in, ceph::buffer::list &out, std::optional<int32_t> &compressor_message);
};
#endif
| 1,691 | 35 | 156 | h |
null | ceph-main/src/compressor/zstd/CompressionPluginZstd.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "acconfig.h"
#include "ceph_ver.h"
#include "common/ceph_context.h"
#include "CompressionPluginZstd.h"
// -----------------------------------------------------------------------------
const char *__ceph_plugin_version()
{
return CEPH_GIT_NICE_VER;
}
// -----------------------------------------------------------------------------
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name)
{
auto instance = cct->get_plugin_registry();
return instance->add(type, name, new CompressionPluginZstd(cct));
}
| 1,020 | 26.594595 | 80 | cc |
null | ceph-main/src/compressor/zstd/CompressionPluginZstd.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Mirantis, Inc.
*
* Author: Alyona Kiseleva <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_COMPRESSION_PLUGIN_ZSTD_H
#define CEPH_COMPRESSION_PLUGIN_ZSTD_H
// -----------------------------------------------------------------------------
#include "ceph_ver.h"
#include "compressor/CompressionPlugin.h"
#include "ZstdCompressor.h"
// -----------------------------------------------------------------------------
class CompressionPluginZstd : public ceph::CompressionPlugin {
public:
explicit CompressionPluginZstd(CephContext* cct) : CompressionPlugin(cct)
{}
int factory(CompressorRef *cs,
std::ostream *ss) override
{
if (compressor == 0) {
ZstdCompressor *interface = new ZstdCompressor(cct);
compressor = CompressorRef(interface);
}
*cs = compressor;
return 0;
}
};
#endif
| 1,181 | 25.863636 | 80 | h |
null | ceph-main/src/compressor/zstd/ZstdCompressor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ZSTDCOMPRESSOR_H
#define CEPH_ZSTDCOMPRESSOR_H
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd/lib/zstd.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include "compressor/Compressor.h"
class ZstdCompressor : public Compressor {
public:
ZstdCompressor(CephContext *cct) : Compressor(COMP_ALG_ZSTD, "zstd"), cct(cct) {}
int compress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> &compressor_message) override {
ZSTD_CStream *s = ZSTD_createCStream();
ZSTD_initCStream_srcSize(s, cct->_conf->compressor_zstd_level, src.length());
auto p = src.begin();
size_t left = src.length();
size_t const out_max = ZSTD_compressBound(left);
ceph::buffer::ptr outptr = ceph::buffer::create_small_page_aligned(out_max);
ZSTD_outBuffer_s outbuf;
outbuf.dst = outptr.c_str();
outbuf.size = outptr.length();
outbuf.pos = 0;
while (left) {
ceph_assert(!p.end());
struct ZSTD_inBuffer_s inbuf;
inbuf.pos = 0;
inbuf.size = p.get_ptr_and_advance(left, (const char**)&inbuf.src);
left -= inbuf.size;
ZSTD_EndDirective const zed = (left==0) ? ZSTD_e_end : ZSTD_e_continue;
size_t r = ZSTD_compressStream2(s, &outbuf, &inbuf, zed);
if (ZSTD_isError(r)) {
return -EINVAL;
}
}
ceph_assert(p.end());
ZSTD_freeCStream(s);
// prefix with decompressed length
ceph::encode((uint32_t)src.length(), dst);
dst.append(outptr, 0, outbuf.pos);
return 0;
}
int decompress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> compressor_message) override {
auto i = std::cbegin(src);
return decompress(i, src.length(), dst, compressor_message);
}
int decompress(ceph::buffer::list::const_iterator &p,
size_t compressed_len,
ceph::buffer::list &dst,
std::optional<int32_t> compressor_message) override {
if (compressed_len < 4) {
return -1;
}
compressed_len -= 4;
uint32_t dst_len;
ceph::decode(dst_len, p);
ceph::buffer::ptr dstptr(dst_len);
ZSTD_outBuffer_s outbuf;
outbuf.dst = dstptr.c_str();
outbuf.size = dstptr.length();
outbuf.pos = 0;
ZSTD_DStream *s = ZSTD_createDStream();
ZSTD_initDStream(s);
while (compressed_len > 0) {
if (p.end()) {
return -1;
}
ZSTD_inBuffer_s inbuf;
inbuf.pos = 0;
inbuf.size = p.get_ptr_and_advance(compressed_len,
(const char**)&inbuf.src);
ZSTD_decompressStream(s, &outbuf, &inbuf);
compressed_len -= inbuf.size;
}
ZSTD_freeDStream(s);
dst.append(dstptr, 0, outbuf.pos);
return 0;
}
private:
CephContext *const cct;
};
#endif
| 3,132 | 28.009259 | 126 | h |
null | ceph-main/src/crimson/admin/admin_socket.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/admin/admin_socket.h"
#include <boost/algorithm/string/join.hpp>
#include <fmt/format.h>
#include <fmt/ranges.h>
#include <seastar/net/api.hh>
#include <seastar/net/inet_address.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/sleep.hh>
#include <seastar/core/thread.hh>
#include <seastar/util/std-compat.hh>
#include "common/options.h"
#include "common/version.h"
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "crimson/common/log.h"
#include "crimson/net/Socket.h"
#include "crimson/net/Connection.h"
using namespace crimson::common;
using namespace std::literals;
using ceph::common::cmdmap_from_json;
using ceph::common::cmd_getval;
using ceph::common::bad_cmd_get;
using ceph::common::validate_cmd;
using ceph::common::dump_cmd_and_help_to_json;
namespace {
seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_osd);
}
} // namespace
using std::string;
using std::string_view;
using std::stringstream;
using std::unique_ptr;
namespace crimson::admin {
tell_result_t::tell_result_t(int ret, std::string&& err)
: ret{ret}, err(std::move(err))
{}
tell_result_t::tell_result_t(int ret, std::string&& err, ceph::bufferlist&& out)
: ret{ret}, err(std::move(err)), out(std::move(out))
{}
tell_result_t::tell_result_t(std::unique_ptr<Formatter> formatter)
{
formatter->flush(out);
}
void AdminSocket::register_command(std::unique_ptr<AdminSocketHook>&& hook)
{
auto prefix = hook->prefix;
auto [it, added] = hooks.emplace(prefix, std::move(hook));
assert(added);
logger().info("register_command(): {})", it->first);
}
auto AdminSocket::parse_cmd(const std::vector<std::string>& cmd)
-> std::variant<parsed_command_t, tell_result_t>
{
// preliminaries:
// - create the formatter specified by the cmd parameters
// - locate the "op-code" string (the 'prefix' segment)
// - prepare for command parameters extraction via cmdmap_t
cmdmap_t cmdmap;
ceph::bufferlist out;
try {
stringstream errss;
// note that cmdmap_from_json() may throw on syntax issues
if (!cmdmap_from_json(cmd, &cmdmap, errss)) {
logger().error("{}: incoming command error: {}", __func__, errss.str());
out.append("error:"s);
out.append(errss.str());
return tell_result_t{-EINVAL, "invalid json", std::move(out)};
}
} catch (const std::runtime_error& e) {
logger().error("{}: incoming command syntax: {}", __func__, cmd);
out.append(string{e.what()});
return tell_result_t{-EINVAL, "invalid json", std::move(out)};
}
string format;
string prefix;
try {
cmd_getval(cmdmap, "format", format);
cmd_getval(cmdmap, "prefix", prefix);
// tolerate old-style pg <pgid> command <args> style formatting
if (prefix == "pg") {
cmd_getval(cmdmap, "cmd", prefix);
}
} catch (const bad_cmd_get& e) {
logger().error("{}: invalid syntax: {}", __func__, cmd);
out.append(string{e.what()});
return tell_result_t{-EINVAL, "invalid json", std::move(out)};
}
// match the incoming op-code to one of the registered APIs
if (auto found = hooks.find(prefix); found != hooks.end()) {
return parsed_command_t{ cmdmap, format, *found->second };
} else {
return tell_result_t{-EINVAL,
fmt::format("unknown command '{}'", prefix),
std::move(out)};
}
}
seastar::future<> AdminSocket::finalize_response(
seastar::output_stream<char>& out, ceph::bufferlist&& msgs)
{
string outbuf_cont = msgs.to_str();
if (outbuf_cont.empty()) {
outbuf_cont = " {} ";
}
uint32_t response_length = htonl(outbuf_cont.length());
logger().info("asok response length: {}", outbuf_cont.length());
return out.write(reinterpret_cast<char*>(&response_length),
sizeof(response_length))
.then([&out, outbuf_cont] { return out.write(outbuf_cont.c_str()); });
}
seastar::future<> AdminSocket::handle_command(crimson::net::ConnectionRef conn,
boost::intrusive_ptr<MCommand> m)
{
return execute_command(m->cmd, std::move(m->get_data())).then(
[conn, tid=m->get_tid()](auto result) {
auto [ret, err, out] = std::move(result);
auto reply = crimson::make_message<MCommandReply>(ret, err);
reply->set_tid(tid);
reply->set_data(out);
return conn->send(std::move(reply));
});
}
seastar::future<> AdminSocket::execute_line(std::string cmdline,
seastar::output_stream<char>& out)
{
return execute_command({std::move(cmdline)}, {}).then([&out, this](auto result) {
auto [ret, stderr, stdout] = std::move(result);
if (ret < 0) {
stdout.append(fmt::format("ERROR: {}\n", cpp_strerror(ret)));
stdout.append(stderr);
}
return finalize_response(out, std::move(stdout));
});
}
auto AdminSocket::execute_command(const std::vector<std::string>& cmd,
ceph::bufferlist&& buf)
-> seastar::future<tell_result_t>
{
auto maybe_parsed = parse_cmd(cmd);
if (auto* parsed = std::get_if<parsed_command_t>(&maybe_parsed); parsed) {
stringstream os;
string desc{parsed->hook.desc};
if (!validate_cmd(desc, parsed->params, os)) {
logger().error("AdminSocket::execute_command: "
"failed to validate '{}': {}", cmd, os.str());
ceph::bufferlist out;
out.append(os);
return seastar::make_ready_future<tell_result_t>(
tell_result_t{-EINVAL, "invalid command json", std::move(out)});
}
return parsed->hook.call(parsed->params, parsed->format, std::move(buf));
} else {
auto& result = std::get<tell_result_t>(maybe_parsed);
return seastar::make_ready_future<tell_result_t>(std::move(result));
}
}
// an input_stream consumer that reads buffer into a std::string up to the first
// '\0' which indicates the end of command
struct line_consumer {
using tmp_buf = seastar::temporary_buffer<char>;
using consumption_result_type =
typename seastar::input_stream<char>::consumption_result_type;
seastar::future<consumption_result_type> operator()(tmp_buf&& buf) {
size_t consumed = 0;
for (auto c : buf) {
consumed++;
if (c == '\0') {
buf.trim_front(consumed);
return seastar::make_ready_future<consumption_result_type>(
consumption_result_type::stop_consuming_type(std::move(buf)));
} else {
line.push_back(c);
}
}
return seastar::make_ready_future<consumption_result_type>(
seastar::continue_consuming{});
}
std::string line;
};
seastar::future<> AdminSocket::handle_client(seastar::input_stream<char>& in,
seastar::output_stream<char>& out)
{
auto consumer = seastar::make_shared<line_consumer>();
return in.consume(*consumer).then([consumer, &out, this] {
logger().debug("AdminSocket::handle_client: incoming asok string: {}",
consumer->line);
return execute_line(consumer->line, out);
}).then([&out] {
return out.flush();
}).finally([&out] {
return out.close();
}).then([&in] {
return in.close();
}).handle_exception([](auto ep) {
logger().debug("exception on {}: {}", __func__, ep);
});
}
seastar::future<> AdminSocket::start(const std::string& path)
{
if (path.empty()) {
logger().error(
"{}: Admin Socket socket path missing from the configuration", __func__);
return seastar::now();
}
logger().debug("{}: asok socket path={}", __func__, path);
auto sock_path = seastar::socket_address{ seastar::unix_domain_addr{ path } };
try {
server_sock = seastar::engine().listen(sock_path);
} catch (const std::system_error& e) {
logger().error("{}: unable to listen({}): {}", __func__, path, e.what());
server_sock.reset();
return seastar::make_ready_future<>();
}
// listen in background
task = seastar::keep_doing([this] {
return seastar::try_with_gate(stop_gate, [this] {
assert(!connected_sock.has_value());
return server_sock->accept().then([this](seastar::accept_result acc) {
connected_sock = std::move(acc.connection);
return seastar::do_with(connected_sock->input(),
connected_sock->output(),
[this](auto& input, auto& output) mutable {
return handle_client(input, output);
}).finally([this] {
assert(connected_sock.has_value());
connected_sock.reset();
});
}).handle_exception([this](auto ep) {
if (!stop_gate.is_closed()) {
logger().error("AdminSocket: terminated: {}", ep);
}
});
});
}).handle_exception_type([](const seastar::gate_closed_exception&) {
}).finally([path] {
return seastar::remove_file(path);
});
return seastar::make_ready_future<>();
}
seastar::future<> AdminSocket::stop()
{
if (!server_sock) {
return seastar::now();
}
server_sock->abort_accept();
if (connected_sock) {
connected_sock->shutdown_input();
connected_sock->shutdown_output();
}
return stop_gate.close().then([this] {
assert(task.has_value());
return task->then([] {
logger().info("AdminSocket: stopped");
return seastar::now();
});
});
}
/////////////////////////////////////////
// the internal hooks
/////////////////////////////////////////
class VersionHook final : public AdminSocketHook {
public:
VersionHook()
: AdminSocketHook{"version", "", "get ceph version"}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&&) const final
{
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->open_object_section("version");
f->dump_string("version", ceph_version_to_str());
f->dump_string("release", ceph_release_to_str());
f->dump_string("release_type", ceph_release_type());
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
/**
Note that the git_version command is expected to return a 'version' JSON
segment.
*/
class GitVersionHook final : public AdminSocketHook {
public:
GitVersionHook()
: AdminSocketHook{"git_version", "", "get git sha1"}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&&) const final
{
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->open_object_section("version");
f->dump_string("git_version", git_version_to_str());
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
class HelpHook final : public AdminSocketHook {
const AdminSocket& m_as;
public:
explicit HelpHook(const AdminSocket& as) :
AdminSocketHook{"help", "", "list available commands"},
m_as{as}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&&) const final
{
unique_ptr<Formatter> f{Formatter::create(format,
"json-pretty", "json-pretty")};
f->open_object_section("help");
for (const auto& [prefix, hook] : m_as) {
if (!hook->help.empty()) {
f->dump_string(prefix.data(), hook->help);
}
}
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
class GetdescsHook final : public AdminSocketHook {
const AdminSocket& m_as;
public:
explicit GetdescsHook(const AdminSocket& as) :
AdminSocketHook{"get_command_descriptions",
"",
"list available commands"},
m_as{ as } {}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&&) const final
{
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
int cmdnum = 0;
f->open_object_section("command_descriptions");
for (const auto& [prefix, hook] : m_as) {
auto secname = fmt::format("cmd {:>03}", cmdnum);
auto cmd = fmt::format("{} {}", hook->prefix, hook->desc);
dump_cmd_and_help_to_json(f.get(), CEPH_FEATURES_ALL, secname,
cmd, std::string{hook->help});
cmdnum++;
}
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
class InjectArgsHook final : public AdminSocketHook {
public:
InjectArgsHook()
: AdminSocketHook{"injectargs",
"name=injected_args,type=CephString,n=N",
"inject configuration arguments into running daemon"}
{}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&&) const final
{
std::vector<std::string> argv;
if (!cmd_getval(cmdmap, "injected_args", argv)) {
return seastar::make_ready_future<tell_result_t>();
}
const std::string args = boost::algorithm::join(argv, " ");
return local_conf().inject_args(args).then([] {
return seastar::make_ready_future<tell_result_t>();
}).handle_exception_type([] (const std::invalid_argument& e) {
return seastar::make_ready_future<tell_result_t>(
tell_result_t{-EINVAL, e.what()});
});
}
};
/**
* listing the configuration values
*/
class ConfigShowHook : public AdminSocketHook {
public:
ConfigShowHook() :
AdminSocketHook{"config show",
"",
"dump current config settings"}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->open_object_section("config_show");
local_conf().show_config(f.get());
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
/**
* fetching the value of a specific configuration item
*/
class ConfigGetHook : public AdminSocketHook {
public:
ConfigGetHook() :
AdminSocketHook("config get",
"name=var,type=CephString",
"config get <field>: get the config value")
{}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const final
{
std::string var;
[[maybe_unused]] bool found = cmd_getval(cmdmap, "var", var);
assert(found);
std::string conf_val;
if (int r = local_conf().get_val(var, &conf_val); r < 0) {
return seastar::make_ready_future<tell_result_t>(
tell_result_t{r, fmt::format("error getting {}: {}",
var, cpp_strerror(r))});
}
unique_ptr<Formatter> f{Formatter::create(format,
"json-pretty",
"json-pretty")};
f->open_object_section("config_get");
f->dump_string(var, conf_val);
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
/**
* setting the value of a specific configuration item (an example:
* {"prefix": "config set", "var":"debug_osd", "val": ["30/20"]} )
*/
class ConfigSetHook : public AdminSocketHook {
public:
ConfigSetHook()
: AdminSocketHook("config set",
"name=var,type=CephString "
"name=val,type=CephString,n=N",
"config set <field> <val> [<val> ...]: set a config variable")
{}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&&) const final
{
std::string var;
std::vector<std::string> new_val;
cmd_getval(cmdmap, "var", var);
cmd_getval(cmdmap, "val", new_val);
// val may be multiple words
const std::string joined_values = boost::algorithm::join(new_val, " ");
return local_conf().set_val(var, joined_values).then([format] {
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->open_object_section("config_set");
f->dump_string("success", "");
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}).handle_exception_type([](std::invalid_argument& e) {
return seastar::make_ready_future<tell_result_t>(
tell_result_t{-EINVAL, e.what()});
});
}
};
/**
* listing the configuration values
*/
class ConfigHelpHook : public AdminSocketHook {
public:
ConfigHelpHook() :
AdminSocketHook{"config help",
"",
"get config setting schema and descriptions"}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
// Output all
f->open_array_section("options");
for (const auto &option : ceph_options) {
f->dump_object("option", option);
}
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
/// the hooks that are served directly by the admin_socket server
void AdminSocket::register_admin_commands()
{
register_command(std::make_unique<VersionHook>());
register_command(std::make_unique<GitVersionHook>());
register_command(std::make_unique<HelpHook>(*this));
register_command(std::make_unique<GetdescsHook>(*this));
register_command(std::make_unique<ConfigGetHook>());
register_command(std::make_unique<ConfigSetHook>());
register_command(std::make_unique<ConfigShowHook>());
register_command(std::make_unique<ConfigHelpHook>());
register_command(std::make_unique<InjectArgsHook>());
}
} // namespace crimson::admin
| 18,097 | 32.268382 | 87 | cc |
null | ceph-main/src/crimson/admin/admin_socket.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
A Crimson-wise version of the src/common/admin_socket.h
Note: assumed to be running on a single core.
*/
#include <map>
#include <string>
#include <string_view>
#include <seastar/core/future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/iostream.hh>
#include <seastar/core/shared_mutex.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/net/api.hh>
#include "common/cmdparse.h"
#include "include/buffer.h"
#include "crimson/net/Fwd.h"
class MCommand;
namespace crimson::admin {
class AdminSocket;
struct tell_result_t {
int ret = 0;
std::string err;
ceph::bufferlist out;
tell_result_t() = default;
tell_result_t(int ret, std::string&& err);
tell_result_t(int ret, std::string&& err, ceph::bufferlist&& out);
/**
* create a \c tell_result_t indicating the successful completion
* of command
*
* \param formatter the content of formatter will be flushed to the
* output buffer
*/
tell_result_t(std::unique_ptr<Formatter> formatter);
};
/**
* An abstract class to be inherited by implementations of asock hooks
*/
class AdminSocketHook {
public:
AdminSocketHook(std::string_view prefix,
std::string_view desc,
std::string_view help) :
prefix{prefix}, desc{desc}, help{help}
{}
/**
* handle command defined by cmdmap
*
* \param cmdmap dictionary holding the named parameters
* \param format the expected format of the output
* \param input the binary input of the command
* \pre \c cmdmap should be validated with \c desc
* \retval an instance of \c tell_result_t
* \note a negative \c ret should be set to indicate that the hook fails to
* fulfill the command either because of an invalid input or other
* failures. in that case, a brief reason of the failure should
* noted in \c err in the returned value
*/
virtual seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const = 0;
virtual ~AdminSocketHook() {}
const std::string_view prefix;
const std::string_view desc;
const std::string_view help;
};
class AdminSocket : public seastar::enable_lw_shared_from_this<AdminSocket> {
public:
AdminSocket() = default;
~AdminSocket() = default;
AdminSocket(const AdminSocket&) = delete;
AdminSocket& operator=(const AdminSocket&) = delete;
AdminSocket(AdminSocket&&) = delete;
AdminSocket& operator=(AdminSocket&&) = delete;
/**
* create the async Seastar thread that handles asok commands arriving
* over the socket.
*/
seastar::future<> start(const std::string& path);
seastar::future<> stop();
/**
* register an admin socket hook
*
* Commands (APIs) are registered under a command string. Incoming
* commands are split by spaces and matched against the longest
* registered command. For example, if 'foo' and 'foo bar' are
* registered, and an incoming command is 'foo bar baz', it is
* matched with 'foo bar', while 'foo fud' will match 'foo'.
*
* \param hook a hook which includes its identifying command string, the
* expected call syntax, and some help text.
*
* A note regarding the help text: if empty, command will not be
* included in 'help' output.
*/
void register_command(std::unique_ptr<AdminSocketHook>&& hook);
/**
* Registering the APIs that are served directly by the admin_socket server.
*/
void register_admin_commands();
/**
* handle a command message by replying an MCommandReply with the same tid
*
* \param conn connection over which the incoming command message is received
* \param m message carrying the command vector and optional input buffer
*/
seastar::future<> handle_command(crimson::net::ConnectionRef conn,
boost::intrusive_ptr<MCommand> m);
private:
/**
* the result of analyzing an incoming command, and locating it in
* the registered APIs collection.
*/
struct parsed_command_t {
cmdmap_t params;
std::string format;
const AdminSocketHook& hook;
};
// and the shorthand:
seastar::future<> handle_client(seastar::input_stream<char>& inp,
seastar::output_stream<char>& out);
seastar::future<> execute_line(std::string cmdline,
seastar::output_stream<char>& out);
seastar::future<> finalize_response(seastar::output_stream<char>& out,
ceph::bufferlist&& msgs);
seastar::future<tell_result_t> execute_command(const std::vector<std::string>& cmd,
ceph::bufferlist&& buf);
std::optional<seastar::future<>> task;
std::optional<seastar::server_socket> server_sock;
std::optional<seastar::connected_socket> connected_sock;
/**
* stopping incoming ASOK requests at shutdown
*/
seastar::gate stop_gate;
/**
* parse the incoming command vector, find a registered hook by looking up by
* its prefix, perform sanity checks on the parsed parameters with the hook's
* command description
*
* \param cmd a vector of string which presents a command
* \retval on success, a \c parsed_command_t is returned, tell_result_t with
* detailed error messages is returned otherwise
*/
std::variant<parsed_command_t, tell_result_t>
parse_cmd(const std::vector<std::string>& cmd);
using hooks_t = std::map<std::string_view, std::unique_ptr<AdminSocketHook>>;
hooks_t hooks;
public:
/**
* iterator support
*/
hooks_t::const_iterator begin() const {
return hooks.cbegin();
}
hooks_t::const_iterator end() const {
return hooks.cend();
}
};
} // namespace crimson::admin
| 5,795 | 29.829787 | 85 | h |
null | ceph-main/src/crimson/admin/osd_admin.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/admin/osd_admin.h"
#include <string>
#include <string_view>
#include <fmt/format.h>
#include <seastar/core/do_with.hh>
#include <seastar/core/future.hh>
#include <seastar/core/thread.hh>
#include <seastar/core/scollectd_api.hh>
#include "common/config.h"
#include "crimson/admin/admin_socket.h"
#include "crimson/common/log.h"
#include "crimson/osd/exceptions.h"
#include "crimson/osd/osd.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/shard_services.h"
namespace {
seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_osd);
}
} // namespace
using namespace std::literals;
using std::string_view;
using std::unique_ptr;
using crimson::osd::OSD;
using crimson::common::local_conf;
using namespace crimson::common;
using ceph::common::cmd_getval;
using ceph::common::cmd_getval_or;
namespace crimson::admin {
template <class Hook, class... Args>
std::unique_ptr<AdminSocketHook> make_asok_hook(Args&&... args)
{
return std::make_unique<Hook>(std::forward<Args>(args)...);
}
/**
* An OSD admin hook: OSD status
*/
class OsdStatusHook : public AdminSocketHook {
public:
explicit OsdStatusHook(const crimson::osd::OSD& osd) :
AdminSocketHook{"status", "", "OSD status"},
osd(osd)
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->open_object_section("status");
osd.dump_status(f.get());
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
private:
const crimson::osd::OSD& osd;
};
template std::unique_ptr<AdminSocketHook>
make_asok_hook<OsdStatusHook>(const crimson::osd::OSD& osd);
/**
* An OSD admin hook: send beacon
*/
class SendBeaconHook : public AdminSocketHook {
public:
explicit SendBeaconHook(crimson::osd::OSD& osd) :
AdminSocketHook{"send_beacon",
"",
"send OSD beacon to mon immediately"},
osd(osd)
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
return osd.send_beacon().then([] {
return seastar::make_ready_future<tell_result_t>();
});
}
private:
crimson::osd::OSD& osd;
};
template std::unique_ptr<AdminSocketHook>
make_asok_hook<SendBeaconHook>(crimson::osd::OSD& osd);
/**
* send the latest pg stats to mgr
*/
class FlushPgStatsHook : public AdminSocketHook {
public:
explicit FlushPgStatsHook(crimson::osd::OSD& osd) :
AdminSocketHook("flush_pg_stats",
"",
"flush pg stats"),
osd{osd}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
uint64_t seq = osd.send_pg_stats();
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->dump_unsigned("stat_seq", seq);
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
private:
crimson::osd::OSD& osd;
};
template std::unique_ptr<AdminSocketHook> make_asok_hook<FlushPgStatsHook>(crimson::osd::OSD& osd);
/// dump the history of PGs' peering state
class DumpPGStateHistory final: public AdminSocketHook {
public:
explicit DumpPGStateHistory(const crimson::osd::PGShardManager &pg_shard_manager) :
AdminSocketHook{"dump_pgstate_history",
"",
"dump history of PGs' peering state"},
pg_shard_manager{pg_shard_manager}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
std::unique_ptr<Formatter> fref{
Formatter::create(format, "json-pretty", "json-pretty")};
Formatter *f = fref.get();
f->open_object_section("pgstate_history");
f->open_array_section("pgs");
return pg_shard_manager.for_each_pg([f](auto &pgid, auto &pg) {
f->open_object_section("pg");
f->dump_stream("pg") << pgid;
const auto& peering_state = pg->get_peering_state();
f->dump_string("currently", peering_state.get_current_state());
peering_state.dump_history(f);
f->close_section();
}).then([fref=std::move(fref)]() mutable {
fref->close_section();
fref->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(fref));
});
}
private:
const crimson::osd::PGShardManager &pg_shard_manager;
};
template std::unique_ptr<AdminSocketHook> make_asok_hook<DumpPGStateHistory>(
const crimson::osd::PGShardManager &);
//dump the contents of perfcounters in osd and store
class DumpPerfCountersHook final: public AdminSocketHook {
public:
explicit DumpPerfCountersHook() :
AdminSocketHook{"perfcounters_dump",
"name=logger,type=CephString,req=false "
"name=counter,type=CephString,req=false",
"dump perfcounters in osd and store"}
{}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const final
{
std::unique_ptr<Formatter> f{Formatter::create(format,
"json-pretty",
"json-pretty")};
std::string logger;
std::string counter;
cmd_getval(cmdmap, "logger", logger);
cmd_getval(cmdmap, "counter", counter);
crimson::common::local_perf_coll().dump_formatted(f.get(), false, false, logger, counter);
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
template std::unique_ptr<AdminSocketHook> make_asok_hook<DumpPerfCountersHook>();
/**
* A CephContext admin hook: calling assert (if allowed by
* 'debug_asok_assert_abort')
*/
class AssertAlwaysHook : public AdminSocketHook {
public:
AssertAlwaysHook() :
AdminSocketHook{"assert",
"",
"asserts"}
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
if (local_conf().get_val<bool>("debug_asok_assert_abort")) {
ceph_assert_always(0);
return seastar::make_ready_future<tell_result_t>();
} else {
return seastar::make_ready_future<tell_result_t>(
tell_result_t{-EPERM, "configuration set to disallow asok assert"});
}
}
};
template std::unique_ptr<AdminSocketHook> make_asok_hook<AssertAlwaysHook>();
/**
* A Seastar admin hook: fetching the values of configured metrics
*/
class DumpMetricsHook : public AdminSocketHook {
public:
DumpMetricsHook() :
AdminSocketHook("dump_metrics",
"name=group,type=CephString,req=false",
"dump current configured seastar metrics and their values")
{}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const final
{
std::unique_ptr<Formatter> fref{Formatter::create(format, "json-pretty", "json-pretty")};
auto *f = fref.get();
std::string prefix;
cmd_getval(cmdmap, "group", prefix);
f->open_object_section("metrics");
f->open_array_section("metrics");
return seastar::do_with(std::move(prefix), [f](auto &prefix) {
return crimson::reactor_map_seq([f, &prefix] {
for (const auto& [full_name, metric_family]: seastar::scollectd::get_value_map()) {
if (!prefix.empty() && full_name.compare(0, prefix.size(), prefix) != 0) {
continue;
}
for (const auto& [labels, metric] : metric_family) {
if (metric && metric->is_enabled()) {
f->open_object_section(""); // enclosed by array
DumpMetricsHook::dump_metric_value(f, full_name, *metric, labels);
f->close_section();
}
}
}
});
}).then([fref = std::move(fref)]() mutable {
fref->close_section();
fref->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(fref));
});
}
private:
using registered_metric = seastar::metrics::impl::registered_metric;
using data_type = seastar::metrics::impl::data_type;
static void dump_metric_value(Formatter* f,
string_view full_name,
const registered_metric& metric,
const seastar::metrics::impl::labels_type& labels)
{
f->open_object_section(full_name);
for (const auto& [key, value] : labels) {
f->dump_string(key, value);
}
auto value_name = "value";
switch (auto v = metric(); v.type()) {
case data_type::GAUGE:
f->dump_float(value_name, v.d());
break;
case data_type::REAL_COUNTER:
f->dump_float(value_name, v.d());
break;
case data_type::COUNTER:
double val;
try {
val = v.ui();
} catch (std::range_error&) {
// seastar's cpu steal time may be negative
val = 0;
}
f->dump_unsigned(value_name, val);
break;
case data_type::HISTOGRAM: {
f->open_object_section(value_name);
auto&& h = v.get_histogram();
f->dump_float("sum", h.sample_sum);
f->dump_unsigned("count", h.sample_count);
f->open_array_section("buckets");
for (auto i : h.buckets) {
f->open_object_section("bucket");
f->dump_float("le", i.upper_bound);
f->dump_unsigned("count", i.count);
f->close_section(); // "bucket"
}
{
f->open_object_section("bucket");
f->dump_string("le", "+Inf");
f->dump_unsigned("count", h.sample_count);
f->close_section();
}
f->close_section(); // "buckets"
f->close_section(); // value_name
}
break;
default:
std::abort();
break;
}
f->close_section(); // full_name
}
};
template std::unique_ptr<AdminSocketHook> make_asok_hook<DumpMetricsHook>();
static ghobject_t test_ops_get_object_name(
const OSDMap& osdmap,
const cmdmap_t& cmdmap)
{
auto pool = [&] {
auto pool_arg = cmd_getval<std::string>(cmdmap, "pool");
if (!pool_arg) {
throw std::invalid_argument{"No 'pool' specified"};
}
int64_t pool = osdmap.lookup_pg_pool_name(*pool_arg);
if (pool < 0 && std::isdigit((*pool_arg)[0])) {
pool = std::atoll(pool_arg->c_str());
}
if (pool < 0) {
// the return type of `fmt::format` is `std::string`
throw std::invalid_argument{
fmt::format("Invalid pool '{}'", *pool_arg)
};
}
return pool;
}();
auto [ objname, nspace, raw_pg ] = [&] {
auto obj_arg = cmd_getval<std::string>(cmdmap, "objname");
if (!obj_arg) {
throw std::invalid_argument{"No 'objname' specified"};
}
std::string objname, nspace;
if (std::size_t sep_pos = obj_arg->find_first_of('/');
sep_pos != obj_arg->npos) {
nspace = obj_arg->substr(0, sep_pos);
objname = obj_arg->substr(sep_pos+1);
} else {
objname = *obj_arg;
}
pg_t raw_pg;
if (object_locator_t oloc(pool, nspace);
osdmap.object_locator_to_pg(object_t(objname), oloc, raw_pg) < 0) {
throw std::invalid_argument{"Invalid namespace/objname"};
}
return std::make_tuple(std::move(objname),
std::move(nspace),
std::move(raw_pg));
}();
auto shard_id = cmd_getval_or<int64_t>(cmdmap,
"shardid",
shard_id_t::NO_SHARD);
return ghobject_t{
hobject_t{
object_t{objname}, std::string{}, CEPH_NOSNAP, raw_pg.ps(), pool, nspace
},
ghobject_t::NO_GEN,
shard_id_t{static_cast<int8_t>(shard_id)}
};
}
// Usage:
// injectdataerr <pool> [namespace/]<obj-name> [shardid]
class InjectDataErrorHook : public AdminSocketHook {
public:
InjectDataErrorHook(crimson::osd::ShardServices& shard_services) :
AdminSocketHook("injectdataerr",
"name=pool,type=CephString " \
"name=objname,type=CephObjectname " \
"name=shardid,type=CephInt,req=false,range=0|255",
"inject data error to an object"),
shard_services(shard_services) {
}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const final
{
ghobject_t obj;
try {
obj = test_ops_get_object_name(*shard_services.get_map(), cmdmap);
} catch (const std::invalid_argument& e) {
logger().info("error during data error injection: {}", e.what());
return seastar::make_ready_future<tell_result_t>(-EINVAL,
e.what());
}
return shard_services.get_store().inject_data_error(obj).then([=] {
logger().info("successfully injected data error for obj={}", obj);
ceph::bufferlist bl;
bl.append("ok"sv);
return seastar::make_ready_future<tell_result_t>(0,
std::string{}, // no err
std::move(bl));
});
}
private:
crimson::osd::ShardServices& shard_services;
};
template std::unique_ptr<AdminSocketHook> make_asok_hook<InjectDataErrorHook>(
crimson::osd::ShardServices&);
// Usage:
// injectmdataerr <pool> [namespace/]<obj-name> [shardid]
class InjectMDataErrorHook : public AdminSocketHook {
public:
InjectMDataErrorHook(crimson::osd::ShardServices& shard_services) :
AdminSocketHook("injectmdataerr",
"name=pool,type=CephString " \
"name=objname,type=CephObjectname " \
"name=shardid,type=CephInt,req=false,range=0|255",
"inject data error to an object"),
shard_services(shard_services) {
}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const final
{
ghobject_t obj;
try {
obj = test_ops_get_object_name(*shard_services.get_map(), cmdmap);
} catch (const std::invalid_argument& e) {
logger().info("error during metadata error injection: {}", e.what());
return seastar::make_ready_future<tell_result_t>(-EINVAL,
e.what());
}
return shard_services.get_store().inject_mdata_error(obj).then([=] {
logger().info("successfully injected metadata error for obj={}", obj);
ceph::bufferlist bl;
bl.append("ok"sv);
return seastar::make_ready_future<tell_result_t>(0,
std::string{}, // no err
std::move(bl));
});
}
private:
crimson::osd::ShardServices& shard_services;
};
template std::unique_ptr<AdminSocketHook> make_asok_hook<InjectMDataErrorHook>(
crimson::osd::ShardServices&);
/**
* An InFlightOps admin hook: dump current in-flight operations
*/
class DumpInFlightOpsHook : public AdminSocketHook {
public:
explicit DumpInFlightOpsHook(const crimson::osd::PGShardManager &pg_shard_manager) :
AdminSocketHook{"dump_ops_in_flight", "", "show the ops currently in flight"},
pg_shard_manager(pg_shard_manager)
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
unique_ptr<Formatter> fref{
Formatter::create(format, "json-pretty", "json-pretty")};
auto *f = fref.get();
f->open_object_section("ops_in_flight");
f->open_array_section("ops_in_flight");
return pg_shard_manager.invoke_on_each_shard_seq([f](const auto &shard_services) {
return shard_services.dump_ops_in_flight(f);
}).then([fref=std::move(fref)]() mutable {
fref->close_section();
fref->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(fref));
});
}
private:
const crimson::osd::PGShardManager &pg_shard_manager;
};
template std::unique_ptr<AdminSocketHook>
make_asok_hook<DumpInFlightOpsHook>(const crimson::osd::PGShardManager &);
class DumpHistoricOpsHook : public AdminSocketHook {
public:
explicit DumpHistoricOpsHook(const crimson::osd::OSDOperationRegistry& op_registry) :
AdminSocketHook{"dump_historic_ops", "", "show recent ops"},
op_registry(op_registry)
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->open_object_section("historic_ops");
op_registry.dump_historic_client_requests(f.get());
f->close_section();
f->dump_int("num_ops", 0);
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
private:
const crimson::osd::OSDOperationRegistry& op_registry;
};
template std::unique_ptr<AdminSocketHook>
make_asok_hook<DumpHistoricOpsHook>(const crimson::osd::OSDOperationRegistry& op_registry);
class DumpSlowestHistoricOpsHook : public AdminSocketHook {
public:
explicit DumpSlowestHistoricOpsHook(const crimson::osd::OSDOperationRegistry& op_registry) :
AdminSocketHook{"dump_historic_slow_ops", "", "show slowest recent ops"},
op_registry(op_registry)
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
logger().warn("{}", __func__);
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
f->open_object_section("historic_slow_ops");
op_registry.dump_slowest_historic_client_requests(f.get());
f->close_section();
f->dump_int("num_ops", 0);
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
private:
const crimson::osd::OSDOperationRegistry& op_registry;
};
template std::unique_ptr<AdminSocketHook>
make_asok_hook<DumpSlowestHistoricOpsHook>(const crimson::osd::OSDOperationRegistry& op_registry);
class DumpRecoveryReservationsHook : public AdminSocketHook {
public:
explicit DumpRecoveryReservationsHook(crimson::osd::ShardServices& shard_services) :
AdminSocketHook{"dump_recovery_reservations", "", "show recovery reservations"},
shard_services(shard_services)
{}
seastar::future<tell_result_t> call(const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
logger().debug("{}", __func__);
unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
return seastar::do_with(std::move(f), [this](auto&& f) {
f->open_object_section("reservations");
f->open_object_section("local_reservations");
return shard_services.local_dump_reservations(f.get()).then([&f, this] {
f->close_section();
f->open_object_section("remote_reservations");
return shard_services.remote_dump_reservations(f.get()).then([&f] {
f->close_section();
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
});
});
});
}
private:
crimson::osd::ShardServices& shard_services;
};
template std::unique_ptr<AdminSocketHook>
make_asok_hook<DumpRecoveryReservationsHook>(crimson::osd::ShardServices& shard_services);
} // namespace crimson::admin
| 19,362 | 32.674783 | 99 | cc |
null | ceph-main/src/crimson/admin/osd_admin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
namespace crimson::admin {
class AdminSocketHook;
class AssertAlwaysHook;
class DumpMetricsHook;
class DumpPGStateHistory;
class DumpPerfCountersHook;
class FlushPgStatsHook;
class InjectDataErrorHook;
class InjectMDataErrorHook;
class OsdStatusHook;
class SendBeaconHook;
class DumpInFlightOpsHook;
class DumpHistoricOpsHook;
class DumpSlowestHistoricOpsHook;
class DumpRecoveryReservationsHook;
template<class Hook, class... Args>
std::unique_ptr<AdminSocketHook> make_asok_hook(Args&&... args);
} // namespace crimson::admin
| 664 | 21.931034 | 70 | h |
null | ceph-main/src/crimson/admin/pg_commands.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/admin/pg_commands.h"
#include <memory>
#include <string>
#include <string_view>
#include <fmt/format.h>
#include <seastar/core/future.hh>
#include "crimson/admin/admin_socket.h"
#include "crimson/osd/osd.h"
#include "crimson/osd/pg.h"
using crimson::osd::OSD;
using crimson::osd::PG;
using namespace crimson::common;
using ceph::common::cmd_getval;
namespace crimson::admin::pg {
class PGCommand : public AdminSocketHook {
public:
// TODO: const correctness of osd
PGCommand(crimson::osd::OSD& osd,
std::string_view prefix,
std::string_view desc,
std::string_view help)
: AdminSocketHook{prefix, desc, help}, osd {osd}
{}
seastar::future<tell_result_t> call(const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const final
{
// we have "ceph tell <pgid> <cmd>". and it is the ceph cli's responsibility
// to add "pgid" to the cmd dict. as rados_pg_command() does not set it for
// us. moreover, and "pgid" is not listed in the command description, as user
// command format does not follow the convention of "<prefix> [<args>,...]"
// so we have to verify it on the server side.
std::string pgid_str;
pg_t pgid;
if (!cmd_getval(cmdmap, "pgid", pgid_str)) {
return seastar::make_ready_future<tell_result_t>(
tell_result_t{-EINVAL, "no pgid specified"});
} else if (!pgid.parse(pgid_str.c_str())) {
return seastar::make_ready_future<tell_result_t>(
tell_result_t{-EINVAL, fmt::format("couldn't parse pgid '{}'", pgid_str)});
}
// am i the primary for this pg?
const auto osdmap = osd.get_shard_services().get_map();
spg_t spg_id;
if (!osdmap->get_primary_shard(pgid, &spg_id)) {
return seastar::make_ready_future<tell_result_t>(tell_result_t{
-ENOENT, fmt::format("pgid '{}' does not exist", pgid_str)});
}
return osd.get_pg_shard_manager().with_pg(
spg_id,
[this, spg_id,
cmdmap=std::move(cmdmap),
format=std::move(format),
input=std::move(input)
](auto &&pg) mutable {
if (!pg) {
return seastar::make_ready_future<tell_result_t>(tell_result_t{
-ENOENT, fmt::format("i don't have pgid '{}'", spg_id)});
}
if (!pg->is_primary()) {
return seastar::make_ready_future<tell_result_t>(tell_result_t{
-EAGAIN, fmt::format("not primary for pgid '{}'", spg_id)});
}
return this->do_command(pg, cmdmap, format, std::move(input));
});
}
private:
virtual seastar::future<tell_result_t>
do_command(Ref<PG> pg,
const cmdmap_t& cmdmap,
std::string_view format,
ceph::bufferlist&& input) const = 0;
OSD& osd;
};
class QueryCommand final : public PGCommand {
public:
// TODO: const correctness of osd
explicit QueryCommand(crimson::osd::OSD& osd) :
PGCommand{osd,
"query",
"",
"show details of a specific pg"}
{}
private:
seastar::future<tell_result_t>
do_command(Ref<PG> pg,
const cmdmap_t&,
std::string_view format,
ceph::bufferlist&& input) const final
{
std::unique_ptr<Formatter> f{Formatter::create(format,
"json-pretty",
"json-pretty")};
f->open_object_section("pg");
pg->dump_primary(f.get());
f->close_section();
return seastar::make_ready_future<tell_result_t>(std::move(f));
}
};
class MarkUnfoundLostCommand final : public PGCommand {
public:
explicit MarkUnfoundLostCommand(crimson::osd::OSD& osd) :
PGCommand{osd,
"mark_unfound_lost",
"name=pgid,type=CephPgid,req=false"
" name=mulcmd,type=CephChoices,strings=revert|delete",
"mark all unfound objects in this pg as lost, either"
" removing or reverting to a prior version if one is"
" available"}
{}
seastar::future<tell_result_t>
do_command(Ref<PG> pg,
const cmdmap_t& cmdmap,
std::string_view,
ceph::bufferlist&&) const final
{
// what to do with the unfound object specifically.
std::string cmd;
int op = -1;
cmd_getval(cmdmap, "mulcmd", cmd);
if (cmd == "revert") {
op = pg_log_entry_t::LOST_REVERT;
} else if (cmd == "delete") {
op = pg_log_entry_t::LOST_DELETE;
} else {
return seastar::make_ready_future<tell_result_t>(tell_result_t{
-EINVAL, "mode must be 'revert' or 'delete'; mark not yet implemented"});
}
return pg->mark_unfound_lost(op).then([] {
// TODO
return seastar::make_ready_future<tell_result_t>();
});
}
};
} // namespace crimson::admin::pg
namespace crimson::admin {
template <class Hook, class... Args>
std::unique_ptr<AdminSocketHook> make_asok_hook(Args&&... args)
{
return std::make_unique<Hook>(std::forward<Args>(args)...);
}
template std::unique_ptr<AdminSocketHook>
make_asok_hook<crimson::admin::pg::QueryCommand>(crimson::osd::OSD& osd);
template std::unique_ptr<AdminSocketHook>
make_asok_hook<crimson::admin::pg::MarkUnfoundLostCommand>(crimson::osd::OSD& osd);
} // namespace crimson::admin
| 5,424 | 31.291667 | 83 | cc |
null | ceph-main/src/crimson/admin/pg_commands.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
namespace crimson::admin::pg {
class QueryCommand;
class MarkUnfoundLostCommand;
} // namespace crimson::admin::pg
| 230 | 20 | 70 | h |
null | ceph-main/src/crimson/auth/AuthClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
#include <string>
#include <tuple>
#include <vector>
#include "include/buffer_fwd.h"
#include "crimson/net/Fwd.h"
class CryptoKey;
namespace crimson::auth {
class error : public std::logic_error {
public:
using std::logic_error::logic_error;
};
using method_t = uint32_t;
// TODO: revisit interfaces for non-dummy implementations
class AuthClient {
public:
virtual ~AuthClient() {}
struct auth_request_t {
method_t auth_method;
std::vector<uint32_t> preferred_modes;
ceph::bufferlist auth_bl;
};
/// Build an authentication request to begin the handshake
///
/// @throw auth::error if unable to build the request
virtual auth_request_t get_auth_request(crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta) = 0;
/// Handle server's request to continue the handshake
///
/// @throw auth::error if unable to build the request
virtual ceph::bufferlist handle_auth_reply_more(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
const ceph::bufferlist& bl) = 0;
/// Handle server's indication that authentication succeeded
///
/// @return 0 if authenticated, a negative number otherwise
virtual int handle_auth_done(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint64_t global_id,
uint32_t con_mode,
const bufferlist& bl) = 0;
/// Handle server's indication that the previous auth attempt failed
///
/// @return 0 if will try next auth method, a negative number if we have no
/// more options
virtual int handle_auth_bad_method(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) = 0;
};
} // namespace crimson::auth
| 1,959 | 26.222222 | 77 | h |
null | ceph-main/src/crimson/auth/AuthServer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
#include <utility>
#include <vector>
#include "crimson/net/Fwd.h"
struct AuthAuthorizeHandler;
namespace crimson::auth {
class AuthServer {
public:
virtual ~AuthServer() {}
// Get authentication methods and connection modes for the given peer type
virtual std::pair<std::vector<uint32_t>, std::vector<uint32_t>>
get_supported_auth_methods(int peer_type) = 0;
// Get support connection modes for the given peer type and auth method
virtual uint32_t pick_con_mode(
int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes) = 0;
// return an AuthAuthorizeHandler for the given peer type and auth method
virtual AuthAuthorizeHandler* get_auth_authorize_handler(
int peer_type,
int auth_method) = 0;
// Handle an authentication request on an incoming connection
virtual int handle_auth_request(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
bool more, //< true if this is not the first part of the handshake
uint32_t auth_method,
const bufferlist& bl,
uint64_t *p_peer_global_id,
bufferlist *reply) = 0;
};
} // namespace crimson::auth
| 1,288 | 28.976744 | 80 | h |
null | ceph-main/src/crimson/auth/DummyAuth.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "AuthClient.h"
#include "AuthServer.h"
namespace crimson::auth {
class DummyAuthClientServer : public AuthClient,
public AuthServer {
public:
DummyAuthClientServer() {}
// client
std::pair<std::vector<uint32_t>, std::vector<uint32_t>>
get_supported_auth_methods(int peer_type) final {
return {{CEPH_AUTH_NONE}, {CEPH_AUTH_NONE}};
}
uint32_t pick_con_mode(int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes) final {
ceph_assert(auth_method == CEPH_AUTH_NONE);
ceph_assert(preferred_modes.size() &&
preferred_modes[0] == CEPH_CON_MODE_CRC);
return CEPH_CON_MODE_CRC;
}
AuthAuthorizeHandler* get_auth_authorize_handler(int peer_type,
int auth_method) final {
return nullptr;
}
AuthClient::auth_request_t get_auth_request(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta) override {
return {CEPH_AUTH_NONE, {CEPH_CON_MODE_CRC}, {}};
}
ceph::bufferlist handle_auth_reply_more(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
const bufferlist& bl) override {
ceph_abort();
}
int handle_auth_done(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint64_t global_id,
uint32_t con_mode,
const bufferlist& bl) override {
return 0;
}
int handle_auth_bad_method(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) override {
ceph_abort();
}
// server
int handle_auth_request(
crimson::net::Connection &conn,
AuthConnectionMeta &auth_meta,
bool more,
uint32_t auth_method,
const bufferlist& bl,
uint64_t *p_peer_global_id,
bufferlist *reply) override {
return 1;
}
};
} // namespace crimson::auth
| 2,053 | 24.675 | 70 | h |
null | ceph-main/src/crimson/auth/KeyRing.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "KeyRing.h"
#include <boost/algorithm/string.hpp>
#include <seastar/core/do_with.hh>
#include <seastar/core/fstream.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/reactor.hh>
#include "common/buffer_seastar.h"
#include "auth/KeyRing.h"
#include "include/denc.h"
#include "crimson/common/buffer_io.h"
#include "crimson/common/config_proxy.h"
namespace crimson::auth {
seastar::future<KeyRing*> load_from_keyring(KeyRing* keyring)
{
std::vector<std::string> paths;
boost::split(paths, crimson::common::local_conf()->keyring,
boost::is_any_of(",;"));
std::pair<bool, std::string> found;
return seastar::map_reduce(paths, [](auto path) {
return seastar::engine().file_exists(path).then([path](bool file_exists) {
return std::make_pair(file_exists, path);
});
}, std::move(found), [](auto found, auto file_exists_and_path) {
if (!found.first && file_exists_and_path.first) {
found = std::move(file_exists_and_path);
}
return found;
}).then([keyring] (auto file_exists_and_path) {
const auto& [exists, path] = file_exists_and_path;
if (exists) {
return read_file(path).then([keyring](auto buf) {
bufferlist bl;
bl.append(buffer::create(std::move(buf)));
auto i = bl.cbegin();
keyring->decode(i);
return seastar::make_ready_future<KeyRing*>(keyring);
});
} else {
return seastar::make_ready_future<KeyRing*>(keyring);
}
});
}
seastar::future<KeyRing*> load_from_keyfile(KeyRing* keyring)
{
auto& path = crimson::common::local_conf()->keyfile;
if (!path.empty()) {
return read_file(path).then([keyring](auto buf) {
EntityAuth ea;
ea.key.decode_base64(std::string(buf.begin(),
buf.end()));
keyring->add(crimson::common::local_conf()->name, ea);
return seastar::make_ready_future<KeyRing*>(keyring);
});
} else {
return seastar::make_ready_future<KeyRing*>(keyring);
}
}
seastar::future<KeyRing*> load_from_key(KeyRing* keyring)
{
auto& key = crimson::common::local_conf()->key;
if (!key.empty()) {
EntityAuth ea;
ea.key.decode_base64(key);
keyring->add(crimson::common::local_conf()->name, ea);
}
return seastar::make_ready_future<KeyRing*>(keyring);
}
} // namespace crimson::auth
| 2,447 | 29.6 | 78 | cc |
null | ceph-main/src/crimson/auth/KeyRing.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
class KeyRing;
namespace crimson::auth {
// see KeyRing::from_ceph_context
seastar::future<KeyRing*> load_from_keyring(KeyRing* keyring);
seastar::future<KeyRing*> load_from_keyfile(KeyRing* keyring);
seastar::future<KeyRing*> load_from_key(KeyRing* keyring);
}
| 420 | 25.3125 | 71 | h |
null | ceph-main/src/crimson/common/assert.cc | #include <cstdarg>
#include <iostream>
#include <seastar/util/backtrace.hh>
#include <seastar/core/reactor.hh>
#include "include/ceph_assert.h"
#include "crimson/common/log.h"
namespace ceph {
[[gnu::cold]] void __ceph_assert_fail(const ceph::assert_data &ctx)
{
__ceph_assert_fail(ctx.assertion, ctx.file, ctx.line, ctx.function);
}
[[gnu::cold]] void __ceph_assert_fail(const char* assertion,
const char* file, int line,
const char* func)
{
seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', ceph_assert(%s)\n"
"{}",
file, line, func, assertion,
seastar::current_backtrace());
std::cout << std::flush;
abort();
}
[[gnu::cold]] void __ceph_assertf_fail(const char *assertion,
const char *file, int line,
const char *func, const char* msg,
...)
{
char buf[8096];
va_list args;
va_start(args, msg);
std::vsnprintf(buf, sizeof(buf), msg, args);
va_end(args);
seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', ceph_assert(%s)\n"
"{}\n{}\n",
file, line, func, assertion,
buf,
seastar::current_backtrace());
std::cout << std::flush;
abort();
}
[[gnu::cold]] void __ceph_abort(const char* file, int line,
const char* func, const std::string& msg)
{
seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', abort(%s)\n"
"{}",
file, line, func, msg,
seastar::current_backtrace());
std::cout << std::flush;
abort();
}
[[gnu::cold]] void __ceph_abortf(const char* file, int line,
const char* func, const char* fmt,
...)
{
char buf[8096];
va_list args;
va_start(args, fmt);
std::vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', abort()\n"
"{}\n{}\n",
file, line, func,
buf,
seastar::current_backtrace());
std::cout << std::flush;
abort();
}
}
| 2,531 | 29.878049 | 75 | cc |
null | ceph-main/src/crimson/common/auth_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
class EntityName;
class AuthCapsInfo;
namespace crimson::common {
class AuthHandler {
public:
// the peer just got authorized
virtual void handle_authentication(const EntityName& name,
const AuthCapsInfo& caps) = 0;
virtual ~AuthHandler() = default;
};
}
| 384 | 20.388889 | 70 | h |
null | ceph-main/src/crimson/common/buffer_io.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "buffer_io.h"
#include <seastar/core/reactor.hh>
#include <seastar/core/fstream.hh>
#include <seastar/core/do_with.hh>
#include "include/buffer.h"
namespace crimson {
seastar::future<> write_file(ceph::buffer::list&& bl,
seastar::sstring fn,
seastar::file_permissions permissions)
{
const auto flags = (seastar::open_flags::wo |
seastar::open_flags::create |
seastar::open_flags::truncate);
seastar::file_open_options foo;
foo.create_permissions = permissions;
return seastar::open_file_dma(fn, flags, foo).then(
[bl=std::move(bl)](seastar::file f) {
return seastar::make_file_output_stream(f).then(
[bl=std::move(bl), f=std::move(f)](seastar::output_stream<char> out) {
return seastar::do_with(std::move(out),
std::move(f),
std::move(bl),
[](seastar::output_stream<char>& out,
seastar::file& f,
ceph::buffer::list& bl) {
return seastar::do_for_each(bl.buffers(), [&out](auto& buf) {
return out.write(buf.c_str(), buf.length());
}).then([&out] {
return out.close();
});
});
});
});
}
seastar::future<seastar::temporary_buffer<char>>
read_file(const seastar::sstring fn)
{
return seastar::open_file_dma(fn, seastar::open_flags::ro).then(
[] (seastar::file f) {
return f.size().then([f = std::move(f)](size_t s) {
return seastar::do_with(seastar::make_file_input_stream(f),
[s](seastar::input_stream<char>& in) {
return in.read_exactly(s);
});
});
});
}
}
| 1,855 | 31 | 76 | cc |
null | ceph-main/src/crimson/common/buffer_io.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/file-types.hh>
#include "include/buffer_fwd.h"
namespace crimson {
seastar::future<> write_file(ceph::buffer::list&& bl,
seastar::sstring fn,
seastar::file_permissions= // 0644
(seastar::file_permissions::user_read |
seastar::file_permissions::user_write |
seastar::file_permissions::group_read |
seastar::file_permissions::others_read));
seastar::future<seastar::temporary_buffer<char>>
read_file(const seastar::sstring fn);
}
| 803 | 35.545455 | 75 | h |
null | ceph-main/src/crimson/common/condition_variable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/condition-variable.hh>
#include <seastar/core/loop.hh>
#include "crimson/common/interruptible_future.h"
namespace crimson {
class condition_variable : public seastar::condition_variable {
public:
template <typename Pred, typename Func>
auto wait(
Pred&& pred,
Func&& action) noexcept {
using func_result_t = std::invoke_result_t<Func>;
using intr_errorator_t = typename func_result_t::interrupt_errorator_type;
using intr_cond_t = typename func_result_t::interrupt_cond_type;
using interruptor = crimson::interruptible::interruptor<intr_cond_t>;
return interruptor::repeat(
[this, pred=std::forward<Pred>(pred),
action=std::forward<Func>(action)]()
-> typename intr_errorator_t::template future<seastar::stop_iteration> {
if (!pred()) {
return seastar::condition_variable::wait().then([] {
return seastar::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::no);
});
} else {
return action().si_then([] {
return seastar::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::yes);
});
}
});
}
};
} // namespace crimson
| 1,321 | 29.045455 | 78 | h |
null | ceph-main/src/crimson/common/config_proxy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "config_proxy.h"
#include <filesystem>
#include "crimson/common/buffer_io.h"
namespace crimson::common {
ConfigProxy::ConfigProxy(const EntityName& name, std::string_view cluster)
{
if (seastar::this_shard_id() != 0) {
return;
}
// set the initial value on CPU#0
values.reset(seastar::make_lw_shared<ConfigValues>());
values.get()->name = name;
values.get()->cluster = cluster;
// and the only copy of md_config_impl<> is allocated on CPU#0
local_config.reset(new md_config_t{*values, obs_mgr, true});
if (name.is_mds()) {
local_config->set_val_default(*values, obs_mgr,
"keyring", "$mds_data/keyring");
} else if (name.is_osd()) {
local_config->set_val_default(*values, obs_mgr,
"keyring", "$osd_data/keyring");
}
}
seastar::future<> ConfigProxy::start()
{
// populate values and config to all other shards
if (!values) {
return seastar::make_ready_future<>();
}
return container().invoke_on_others([this](auto& proxy) {
return values.copy().then([config=local_config.get(),
&proxy](auto foreign_values) {
proxy.values.reset();
proxy.values = std::move(foreign_values);
proxy.remote_config = config;
return seastar::make_ready_future<>();
});
});
}
void ConfigProxy::show_config(ceph::Formatter* f) const {
get_config().show_config(*values, f);
}
seastar::future<> ConfigProxy::parse_config_files(const std::string& conf_files)
{
auto conffile_paths =
get_config().get_conffile_paths(*values,
conf_files.empty() ? nullptr : conf_files.c_str(),
&std::cerr,
CODE_ENVIRONMENT_DAEMON);
return seastar::do_with(std::move(conffile_paths), [this] (auto& paths) {
return seastar::repeat([path=paths.begin(), e=paths.end(), this]() mutable {
if (path == e) {
// tried all conffile, none of them works
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
return crimson::read_file(*path++).then([this](auto&& buf) {
return do_change([buf=std::move(buf), this](ConfigValues& values) {
if (get_config().parse_buffer(values, obs_mgr,
buf.get(), buf.size(),
&std::cerr) == 0) {
get_config().update_legacy_vals(values);
} else {
throw std::invalid_argument("parse error");
}
}).then([] {
// this one works!
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
});
}).handle_exception_type([] (const std::filesystem::filesystem_error&) {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
}).handle_exception_type([] (const std::invalid_argument&) {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
});
});
}
ConfigProxy::ShardedConfig ConfigProxy::sharded_conf;
}
| 3,238 | 33.457447 | 86 | cc |
null | ceph-main/src/crimson/common/config_proxy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/reactor.hh>
#include <seastar/core/sharded.hh>
#include "common/config.h"
#include "common/config_obs.h"
#include "common/config_obs_mgr.h"
#include "common/errno.h"
namespace ceph {
class Formatter;
}
namespace crimson::common {
// a facade for managing config. each shard has its own copy of ConfigProxy.
//
// In seastar-osd, there could be multiple instances of @c ConfigValues in a
// single process, as we are using a variant of read-copy-update mechinary to
// update the settings at runtime.
class ConfigProxy : public seastar::peering_sharded_service<ConfigProxy>
{
using LocalConfigValues = seastar::lw_shared_ptr<ConfigValues>;
seastar::foreign_ptr<LocalConfigValues> values;
md_config_t* remote_config = nullptr;
std::unique_ptr<md_config_t> local_config;
using ConfigObserver = ceph::md_config_obs_impl<ConfigProxy>;
ObserverMgr<ConfigObserver> obs_mgr;
const md_config_t& get_config() const {
return remote_config ? *remote_config : * local_config;
}
md_config_t& get_config() {
return remote_config ? *remote_config : * local_config;
}
// apply changes to all shards
// @param func a functor which accepts @c "ConfigValues&"
template<typename Func>
seastar::future<> do_change(Func&& func) {
return container().invoke_on(values.get_owner_shard(),
[func = std::move(func)](ConfigProxy& owner) {
// apply the changes to a copy of the values
auto new_values = seastar::make_lw_shared(*owner.values);
new_values->changed.clear();
func(*new_values);
// always apply the new settings synchronously on the owner shard, to
// avoid racings with other do_change() calls in parallel.
ObserverMgr<ConfigObserver>::rev_obs_map rev_obs;
owner.values.reset(new_values);
owner.obs_mgr.for_each_change(owner.values->changed, owner,
[&rev_obs](ConfigObserver *obs,
const std::string &key) {
rev_obs[obs].insert(key);
}, nullptr);
for (auto& [obs, keys] : rev_obs) {
obs->handle_conf_change(owner, keys);
}
return seastar::parallel_for_each(boost::irange(1u, seastar::smp::count),
[&owner, new_values] (auto cpu) {
return owner.container().invoke_on(cpu,
[foreign_values = seastar::make_foreign(new_values)](ConfigProxy& proxy) mutable {
proxy.values.reset();
proxy.values = std::move(foreign_values);
ObserverMgr<ConfigObserver>::rev_obs_map rev_obs;
proxy.obs_mgr.for_each_change(proxy.values->changed, proxy,
[&rev_obs](ConfigObserver *obs, const std::string& key) {
rev_obs[obs].insert(key);
}, nullptr);
for (auto& obs_keys : rev_obs) {
obs_keys.first->handle_conf_change(proxy, obs_keys.second);
}
});
}).finally([new_values] {
new_values->changed.clear();
});
});
}
public:
ConfigProxy(const EntityName& name, std::string_view cluster);
const ConfigValues* operator->() const noexcept {
return values.get();
}
const ConfigValues get_config_values() {
return *values.get();
}
ConfigValues* operator->() noexcept {
return values.get();
}
// required by sharded<>
seastar::future<> start();
seastar::future<> stop() {
return seastar::make_ready_future<>();
}
void add_observer(ConfigObserver* obs) {
obs_mgr.add_observer(obs);
}
void remove_observer(ConfigObserver* obs) {
obs_mgr.remove_observer(obs);
}
seastar::future<> rm_val(const std::string& key) {
return do_change([key, this](ConfigValues& values) {
auto ret = get_config().rm_val(values, key);
if (ret < 0) {
throw std::invalid_argument(cpp_strerror(ret));
}
});
}
seastar::future<> set_val(const std::string& key,
const std::string& val) {
return do_change([key, val, this](ConfigValues& values) {
std::stringstream err;
auto ret = get_config().set_val(values, obs_mgr, key, val, &err);
if (ret < 0) {
throw std::invalid_argument(err.str());
}
});
}
int get_val(std::string_view key, std::string *val) const {
return get_config().get_val(*values, key, val);
}
template<typename T>
const T get_val(std::string_view key) const {
return get_config().template get_val<T>(*values, key);
}
int get_all_sections(std::vector<std::string>& sections) const {
return get_config().get_all_sections(sections);
}
int get_val_from_conf_file(const std::vector<std::string>& sections,
const std::string& key, std::string& out,
bool expand_meta) const {
return get_config().get_val_from_conf_file(*values, sections, key,
out, expand_meta);
}
unsigned get_osd_pool_default_min_size(uint8_t size) const {
return get_config().get_osd_pool_default_min_size(*values, size);
}
seastar::future<>
set_mon_vals(const std::map<std::string,std::string,std::less<>>& kv) {
return do_change([kv, this](ConfigValues& values) {
get_config().set_mon_vals(nullptr, values, obs_mgr, kv, nullptr);
});
}
seastar::future<> inject_args(const std::string& s) {
return do_change([s, this](ConfigValues& values) {
std::stringstream err;
if (get_config().injectargs(values, obs_mgr, s, &err)) {
throw std::invalid_argument(err.str());
}
});
}
void show_config(ceph::Formatter* f) const;
seastar::future<> parse_argv(std::vector<const char*>& argv) {
// we could pass whatever is unparsed to seastar, but seastar::app_template
// is used for driving the seastar application, and
// crimson::common::ConfigProxy is not available until seastar engine is up
// and running, so we have to feed the command line args to app_template
// first, then pass them to ConfigProxy.
return do_change([&argv, this](ConfigValues& values) {
get_config().parse_argv(values,
obs_mgr,
argv,
CONF_CMDLINE);
});
}
seastar::future<> parse_env() {
return do_change([this](ConfigValues& values) {
get_config().parse_env(CEPH_ENTITY_TYPE_OSD,
values,
obs_mgr);
});
}
seastar::future<> parse_config_files(const std::string& conf_files);
using ShardedConfig = seastar::sharded<ConfigProxy>;
private:
static ShardedConfig sharded_conf;
friend ConfigProxy& local_conf();
friend ShardedConfig& sharded_conf();
};
inline ConfigProxy& local_conf() {
return ConfigProxy::sharded_conf.local();
}
inline ConfigProxy::ShardedConfig& sharded_conf() {
return ConfigProxy::sharded_conf;
}
template<typename T>
const T get_conf(const std::string& key) {
return local_conf().template get_val<T>(key);
}
}
| 7,066 | 32.023364 | 92 | h |
null | ceph-main/src/crimson/common/errorator-loop.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <seastar/core/future.hh>
#include "crimson/common/errorator.h"
namespace crimson {
template <class... AllowedErrors>
class parallel_for_each_state final : private seastar::continuation_base<> {
using future_t = typename errorator<AllowedErrors...>::template future<>;
std::vector<future_t> _incomplete;
seastar::promise<> _result;
std::exception_ptr _ex;
private:
void wait_for_one() noexcept {
while (!_incomplete.empty() && _incomplete.back().available()) {
if (_incomplete.back().failed()) {
_ex = _incomplete.back().get_exception();
}
_incomplete.pop_back();
}
if (!_incomplete.empty()) {
seastar::internal::set_callback(std::move(_incomplete.back()),
static_cast<continuation_base<>*>(this));
_incomplete.pop_back();
return;
}
if (__builtin_expect(bool(_ex), false)) {
_result.set_exception(std::move(_ex));
} else {
_result.set_value();
}
delete this;
}
virtual void run_and_dispose() noexcept override {
if (_state.failed()) {
_ex = std::move(_state).get_exception();
}
_state = {};
wait_for_one();
}
task* waiting_task() noexcept override { return _result.waiting_task(); }
public:
parallel_for_each_state(size_t n) {
_incomplete.reserve(n);
}
void add_future(future_t&& f) {
_incomplete.push_back(std::move(f));
}
future_t get_future() {
auto ret = _result.get_future();
wait_for_one();
return ret;
}
};
template <typename Iterator, typename Func, typename... AllowedErrors>
static inline typename errorator<AllowedErrors...>::template future<>
parallel_for_each(Iterator first, Iterator last, Func&& func) noexcept {
parallel_for_each_state<AllowedErrors...>* s = nullptr;
// Process all elements, giving each future the following treatment:
// - available, not failed: do nothing
// - available, failed: collect exception in ex
// - not available: collect in s (allocating it if needed)
for (;first != last; ++first) {
auto f = seastar::futurize_invoke(std::forward<Func>(func), *first);
if (!f.available() || f.failed()) {
if (!s) {
using itraits = std::iterator_traits<Iterator>;
auto n = (seastar::internal::iterator_range_estimate_vector_capacity(
first, last, typename itraits::iterator_category()) + 1);
s = new parallel_for_each_state<AllowedErrors...>(n);
}
s->add_future(std::move(f));
}
}
// If any futures were not available, hand off to parallel_for_each_state::start().
// Otherwise we can return a result immediately.
if (s) {
// s->get_future() takes ownership of s (and chains it to one of the futures it contains)
// so this isn't a leak
return s->get_future();
}
return seastar::make_ready_future<>();
}
} // namespace crimson
| 3,003 | 31.652174 | 93 | h |
null | ceph-main/src/crimson/common/errorator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <exception>
#include <system_error>
#include <seastar/core/future-util.hh>
#include "crimson/common/utility.h"
#include "include/ceph_assert.h"
namespace crimson::interruptible {
template <typename, typename>
class parallel_for_each_state;
template <typename, typename>
class interruptible_future_detail;
}
namespace crimson {
// crimson::do_for_each_state is the mirror of seastar::do_for_each_state with FutureT
template <typename Iterator, typename AsyncAction, typename FutureT>
class do_for_each_state final : public seastar::continuation_base<> {
Iterator _begin;
Iterator _end;
AsyncAction _action;
seastar::promise<> _pr;
public:
do_for_each_state(Iterator begin, Iterator end, AsyncAction action,
FutureT&& first_unavailable)
: _begin(std::move(begin)), _end(std::move(end)), _action(std::move(action)) {
seastar::internal::set_callback(std::move(first_unavailable), this);
}
virtual void run_and_dispose() noexcept override {
std::unique_ptr<do_for_each_state> zis(this);
if (_state.failed()) {
_pr.set_urgent_state(std::move(_state));
return;
}
while (_begin != _end) {
auto f = seastar::futurize_invoke(_action, *_begin);
++_begin;
if (f.failed()) {
f._forward_to(std::move(_pr));
return;
}
if (!f.available() || seastar::need_preempt()) {
_state = {};
seastar::internal::set_callback(std::move(f), this);
zis.release();
return;
}
}
_pr.set_value();
}
task* waiting_task() noexcept override {
return _pr.waiting_task();
}
FutureT get_future() {
return _pr.get_future();
}
};
template<typename Iterator, typename AsyncAction,
typename FutureT = std::invoke_result_t<AsyncAction, typename Iterator::reference>>
inline FutureT do_for_each_impl(Iterator begin, Iterator end, AsyncAction action) {
while (begin != end) {
auto f = seastar::futurize_invoke(action, *begin);
++begin;
if (f.failed()) {
return f;
}
if (!f.available() || seastar::need_preempt()) {
// s will be freed by run_and_dispose()
auto* s = new crimson::do_for_each_state<Iterator, AsyncAction, FutureT>{
std::move(begin), std::move(end), std::move(action), std::move(f)};
return s->get_future();
}
}
return seastar::make_ready_future<>();
}
template<typename Iterator, typename AsyncAction>
inline auto do_for_each(Iterator begin, Iterator end, AsyncAction action) {
return ::crimson::do_for_each_impl(begin, end, std::move(action));
}
template<typename Container, typename AsyncAction>
inline auto do_for_each(Container& c, AsyncAction action) {
return ::crimson::do_for_each(std::begin(c), std::end(c), std::move(action));
}
template<typename AsyncAction>
inline auto repeat(AsyncAction action) {
using errorator_t =
typename ::seastar::futurize_t<std::invoke_result_t<AsyncAction>>::errorator_type;
while (true) {
auto f = ::seastar::futurize_invoke(action);
if (f.failed()) {
return errorator_t::template make_exception_future2<>(
f.get_exception()
);
} else if (f.available()) {
if (auto done = f.get0()) {
return errorator_t::template make_ready_future<>();
}
} else {
return std::move(f)._then(
[action = std::move(action)] (auto stop) mutable {
if (stop == seastar::stop_iteration::yes) {
return errorator_t::template make_ready_future<>();
}
return ::crimson::repeat(
std::move(action));
});
}
}
}
// define the interface between error types and errorator
template <class ConcreteErrorT>
class error_t {
static constexpr const std::type_info& get_exception_ptr_type_info() {
return ConcreteErrorT::exception_ptr_type_info();
}
decltype(auto) static from_exception_ptr(std::exception_ptr ep) {
return ConcreteErrorT::from_exception_ptr(std::move(ep));
}
template <class... AllowedErrorsT>
friend struct errorator;
template <class ErrorVisitorT, class FuturatorT>
friend class maybe_handle_error_t;
protected:
std::exception_ptr to_exception_ptr() const {
const auto* concrete_error = static_cast<const ConcreteErrorT*>(this);
return concrete_error->to_exception_ptr();
}
public:
template <class Func>
static decltype(auto) handle(Func&& func) {
return ConcreteErrorT::handle(std::forward<Func>(func));
}
};
// unthrowable_wrapper ensures compilation failure when somebody
// would like to `throw make_error<...>)()` instead of returning.
// returning allows for the compile-time verification of future's
// AllowedErrorsV and also avoid the burden of throwing.
template <class ErrorT, ErrorT ErrorV>
struct unthrowable_wrapper : error_t<unthrowable_wrapper<ErrorT, ErrorV>> {
unthrowable_wrapper(const unthrowable_wrapper&) = delete;
[[nodiscard]] static const auto& make() {
static constexpr unthrowable_wrapper instance{};
return instance;
}
static auto exception_ptr() {
return make().to_exception_ptr();
}
template<class Func>
static auto handle(Func&& func) {
return [
func = std::forward<Func>(func)
] (const unthrowable_wrapper& raw_error) mutable -> decltype(auto) {
if constexpr (std::is_invocable_v<Func, ErrorT, decltype(raw_error)>) {
// check whether the handler wants to take the raw error object which
// would be the case if it wants conditionally handle-or-pass-further.
return std::invoke(std::forward<Func>(func),
ErrorV,
std::move(raw_error));
} else if constexpr (std::is_invocable_v<Func, ErrorT>) {
return std::invoke(std::forward<Func>(func), ErrorV);
} else {
return std::invoke(std::forward<Func>(func));
}
};
}
struct pass_further {
decltype(auto) operator()(const unthrowable_wrapper& e) {
return e;
}
};
struct discard {
decltype(auto) operator()(const unthrowable_wrapper&) {
}
};
private:
// can be used only to initialize the `instance` member
explicit unthrowable_wrapper() = default;
// implement the errorable interface
struct throwable_carrier{};
static std::exception_ptr carrier_instance;
static constexpr const std::type_info& exception_ptr_type_info() {
return typeid(throwable_carrier);
}
auto to_exception_ptr() const {
// error codes don't need to instantiate `std::exception_ptr` each
// time as the code is actually a part of the type itself.
// `std::make_exception_ptr()` on modern enough GCCs is quite cheap
// (see the Gleb Natapov's patch eradicating throw/catch there),
// but using one instance per type boils down the overhead to just
// ref-counting.
return carrier_instance;
}
static const auto& from_exception_ptr(std::exception_ptr) {
return make();
}
friend class error_t<unthrowable_wrapper<ErrorT, ErrorV>>;
};
template <class ErrorT, ErrorT ErrorV>
std::exception_ptr unthrowable_wrapper<ErrorT, ErrorV>::carrier_instance = \
std::make_exception_ptr<
unthrowable_wrapper<ErrorT, ErrorV>::throwable_carrier>({});
template <class ErrorT>
struct stateful_error_t : error_t<stateful_error_t<ErrorT>> {
template <class... Args>
explicit stateful_error_t(Args&&... args)
: ep(std::make_exception_ptr<ErrorT>(std::forward<Args>(args)...)) {
}
template<class Func>
static auto handle(Func&& func) {
return [
func = std::forward<Func>(func)
] (stateful_error_t<ErrorT>&& e) mutable -> decltype(auto) {
if constexpr (std::is_invocable_v<Func>) {
return std::invoke(std::forward<Func>(func));
}
try {
std::rethrow_exception(e.ep);
} catch (const ErrorT& obj) {
if constexpr (std::is_invocable_v<Func, decltype(obj), decltype(e)>) {
return std::invoke(std::forward<Func>(func), obj, e);
} else if constexpr (std::is_invocable_v<Func, decltype(obj)>) {
return std::invoke(std::forward<Func>(func), obj);
}
}
ceph_abort_msg("exception type mismatch -- impossible!");
};
}
private:
std::exception_ptr ep;
explicit stateful_error_t(std::exception_ptr ep) : ep(std::move(ep)) {}
static constexpr const std::type_info& exception_ptr_type_info() {
return typeid(ErrorT);
}
auto to_exception_ptr() const {
return ep;
}
static stateful_error_t<ErrorT> from_exception_ptr(std::exception_ptr ep) {
return stateful_error_t<ErrorT>(std::move(ep));
}
friend class error_t<stateful_error_t<ErrorT>>;
};
namespace _impl {
template <class T> struct always_false : std::false_type {};
};
template <class ErrorVisitorT, class FuturatorT>
class maybe_handle_error_t {
const std::type_info& type_info;
typename FuturatorT::type result;
ErrorVisitorT errfunc;
public:
maybe_handle_error_t(ErrorVisitorT&& errfunc, std::exception_ptr ep)
: type_info(*ep.__cxa_exception_type()),
result(FuturatorT::make_exception_future(std::move(ep))),
errfunc(std::forward<ErrorVisitorT>(errfunc)) {
}
template <class ErrorT>
void handle() {
static_assert(std::is_invocable<ErrorVisitorT, ErrorT>::value,
"provided Error Visitor is not exhaustive");
// In C++ throwing an exception isn't the sole way to signal
// error with it. This approach nicely fits cold, infrequent cases
// but when applied to a hot one, it will likely hurt performance.
//
// Alternative approach is to create `std::exception_ptr` on our
// own and place it in the future via `make_exception_future()`.
// When it comes to handling, the pointer can be interrogated for
// pointee's type with `__cxa_exception_type()` instead of costly
// re-throwing (via `std::rethrow_exception()`) and matching with
// `catch`. The limitation here is lack of support for hierarchies
// of exceptions. The code below checks for exact match only while
// `catch` would allow to match against a base class as well.
// However, this shouldn't be a big issue for `errorator` as Error
// Visitors are already checked for exhaustiveness at compile-time.
//
// NOTE: `__cxa_exception_type()` is an extension of the language.
// It should be available both in GCC and Clang but a fallback
// (based on `std::rethrow_exception()` and `catch`) can be made
// to handle other platforms if necessary.
if (type_info == ErrorT::error_t::get_exception_ptr_type_info()) {
// set `state::invalid` in internals of `seastar::future` to not
// call `report_failed_future()` during `operator=()`.
[[maybe_unused]] auto&& ep = std::move(result).get_exception();
using return_t = std::invoke_result_t<ErrorVisitorT, ErrorT>;
if constexpr (std::is_assignable_v<decltype(result), return_t>) {
result = std::invoke(std::forward<ErrorVisitorT>(errfunc),
ErrorT::error_t::from_exception_ptr(std::move(ep)));
} else if constexpr (std::is_same_v<return_t, void>) {
// void denotes explicit discarding
// execute for the sake a side effects. Typically this boils down
// to throwing an exception by the handler.
std::invoke(std::forward<ErrorVisitorT>(errfunc),
ErrorT::error_t::from_exception_ptr(std::move(ep)));
} else if constexpr (seastar::Future<decltype(result)>) {
// result is seastar::future but return_t is e.g. int. If so,
// the else clause cannot be used as seastar::future lacks
// errorator_type member.
result = seastar::make_ready_future<return_t>(
std::invoke(std::forward<ErrorVisitorT>(errfunc),
ErrorT::error_t::from_exception_ptr(std::move(ep))));
} else {
result = FuturatorT::type::errorator_type::template make_ready_future<return_t>(
std::invoke(std::forward<ErrorVisitorT>(errfunc),
ErrorT::error_t::from_exception_ptr(std::move(ep))));
}
}
}
auto get_result() && {
return std::move(result);
}
};
template <class FuncHead, class... FuncTail>
static constexpr auto composer(FuncHead&& head, FuncTail&&... tail) {
return [
head = std::forward<FuncHead>(head),
// perfect forwarding in lambda's closure isn't available in C++17
// using tuple as workaround; see: https://stackoverflow.com/a/49902823
tail = std::make_tuple(std::forward<FuncTail>(tail)...)
] (auto&&... args) mutable -> decltype(auto) {
if constexpr (std::is_invocable_v<FuncHead, decltype(args)...>) {
return std::invoke(std::forward<FuncHead>(head),
std::forward<decltype(args)>(args)...);
} else if constexpr (sizeof...(FuncTail) > 0) {
using next_composer_t = decltype(composer<FuncTail...>);
auto&& next = std::apply<next_composer_t>(composer<FuncTail...>,
std::move(tail));
return std::invoke(std::move(next),
std::forward<decltype(args)>(args)...);
} else {
static_assert(
std::is_invocable_v<FuncHead, decltype(args)...> ||
(sizeof...(FuncTail) > 0),
"composition is not exhaustive");
}
};
}
template <class ValueT>
struct errorated_future_marker{};
template <class... AllowedErrors>
class parallel_for_each_state;
template <class T>
static inline constexpr bool is_error_v = std::is_base_of_v<error_t<T>, T>;
template <typename... AllowedErrors>
struct errorator;
template <typename Iterator, typename Func, typename... AllowedErrors>
static inline typename errorator<AllowedErrors...>::template future<>
parallel_for_each(Iterator first, Iterator last, Func&& func) noexcept;
template <class... AllowedErrors>
struct errorator {
static_assert((... && is_error_v<AllowedErrors>),
"errorator expects presence of ::is_error in all error types");
template <class ErrorT>
struct contains_once {
static constexpr bool value =
(0 + ... + std::is_same_v<ErrorT, AllowedErrors>) == 1;
};
template <class... Errors>
struct contains_once<errorator<Errors...>> {
static constexpr bool value = (... && contains_once<Errors>::value);
};
template <class T>
static constexpr bool contains_once_v = contains_once<T>::value;
static_assert((... && contains_once_v<AllowedErrors>),
"no error type in errorator can be duplicated");
struct ready_future_marker{};
struct exception_future_marker{};
private:
// see the comment for `using future = _future` below.
template <class>
class [[nodiscard]] _future {};
template <class ValueT>
class [[nodiscard]] _future<::crimson::errorated_future_marker<ValueT>>
: private seastar::future<ValueT> {
using base_t = seastar::future<ValueT>;
// we need the friendship for the sake of `get_exception() &&` when
// `safe_then()` is going to return an errorated future as a result of
// chaining. In contrast to `seastar::future`, errorator<T...>::future`
// has this member private.
template <class ErrorVisitor, class Futurator>
friend class maybe_handle_error_t;
// any `seastar::futurize` specialization must be able to access the base.
// see : `satisfy_with_result_of()` far below.
template <typename>
friend struct seastar::futurize;
template <typename T1, typename T2, typename... More>
friend auto seastar::internal::do_with_impl(T1&& rv1, T2&& rv2, More&&... more);
template <class, class = std::void_t<>>
struct get_errorator {
// generic template for non-errorated things (plain types and
// vanilla seastar::future as well).
using type = errorator<>;
};
template <class FutureT>
struct get_errorator<FutureT,
std::void_t<typename FutureT::errorator_type>> {
using type = typename FutureT::errorator_type;
};
template <class T>
using get_errorator_t = typename get_errorator<T>::type;
template <class ValueFuncErroratorT, class... ErrorVisitorRetsT>
struct make_errorator {
// NOP. The generic template.
};
template <class... ValueFuncAllowedErrors,
class ErrorVisitorRetsHeadT,
class... ErrorVisitorRetsTailT>
struct make_errorator<errorator<ValueFuncAllowedErrors...>,
ErrorVisitorRetsHeadT,
ErrorVisitorRetsTailT...> {
private:
using step_errorator = errorator<ValueFuncAllowedErrors...>;
// add ErrorVisitorRetsHeadT only if 1) it's an error type and
// 2) isn't already included in the errorator's error set.
// It's enough to negate contains_once_v as any errorator<...>
// type is already guaranteed to be free of duplications.
using _next_errorator = std::conditional_t<
is_error_v<ErrorVisitorRetsHeadT> &&
!step_errorator::template contains_once_v<ErrorVisitorRetsHeadT>,
typename step_errorator::template extend<ErrorVisitorRetsHeadT>,
step_errorator>;
using maybe_head_ertr = get_errorator_t<ErrorVisitorRetsHeadT>;
using next_errorator =
typename _next_errorator::template extend_ertr<maybe_head_ertr>;
public:
using type = typename make_errorator<next_errorator,
ErrorVisitorRetsTailT...>::type;
};
// finish the recursion
template <class... ValueFuncAllowedErrors>
struct make_errorator<errorator<ValueFuncAllowedErrors...>> {
using type = ::crimson::errorator<ValueFuncAllowedErrors...>;
};
template <class... Args>
using make_errorator_t = typename make_errorator<Args...>::type;
using base_t::base_t;
template <class Futurator, class Future, class ErrorVisitor>
[[gnu::noinline]]
static auto _safe_then_handle_errors(Future&& future,
ErrorVisitor&& errfunc) {
maybe_handle_error_t<ErrorVisitor, Futurator> maybe_handle_error(
std::forward<ErrorVisitor>(errfunc),
std::move(future).get_exception()
);
(maybe_handle_error.template handle<AllowedErrors>() , ...);
return std::move(maybe_handle_error).get_result();
}
protected:
using base_t::get_exception;
public:
using errorator_type = ::crimson::errorator<AllowedErrors...>;
using promise_type = seastar::promise<ValueT>;
using base_t::available;
using base_t::failed;
// need this because of the legacy in PG::do_osd_ops().
using base_t::handle_exception_type;
[[gnu::always_inline]]
_future(base_t&& base)
: base_t(std::move(base)) {
}
base_t to_base() && {
return std::move(*this);
}
template <class... A>
[[gnu::always_inline]]
_future(ready_future_marker, A&&... a)
: base_t(::seastar::make_ready_future<ValueT>(std::forward<A>(a)...)) {
}
[[gnu::always_inline]]
_future(exception_future_marker, ::seastar::future_state_base&& state) noexcept
: base_t(::seastar::futurize<base_t>::make_exception_future(std::move(state))) {
}
[[gnu::always_inline]]
_future(exception_future_marker, std::exception_ptr&& ep) noexcept
: base_t(::seastar::futurize<base_t>::make_exception_future(std::move(ep))) {
}
template <template <class...> class ErroratedFuture,
class = std::void_t<
typename ErroratedFuture<
::crimson::errorated_future_marker<ValueT>>::errorator_type>>
operator ErroratedFuture<errorated_future_marker<ValueT>> () && {
using dest_errorator_t = \
typename ErroratedFuture<
::crimson::errorated_future_marker<ValueT>>::errorator_type;
static_assert(dest_errorator_t::template contains_once_v<errorator_type>,
"conversion is possible to more-or-eq errorated future!");
return static_cast<base_t&&>(*this);
}
// initialize future as failed without throwing. `make_exception_future()`
// internally uses `std::make_exception_ptr()`. cppreference.com shouldn't
// be misinterpreted when it says:
//
// "This is done as if executing the following code:
// try {
// throw e;
// } catch(...) {
// return std::current_exception();
// }",
//
// the "as if" is absolutely crucial because modern GCCs employ optimized
// path for it. See:
// * https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=cce8e59224e18858749a2324bce583bcfd160d6c,
// * https://gcc.gnu.org/ml/gcc-patches/2016-08/msg00373.html.
//
// This behavior, combined with `__cxa_exception_type()` for inspecting
// exception's type, allows for throw/catch-free handling of stateless
// exceptions (which is fine for error codes). Stateful jumbos would be
// actually a bit harder as `_M_get()` is private, and thus rethrowing is
// necessary to get to the state inside. However, it's not unthinkable to
// see another extension bringing operator*() to the exception pointer...
//
// TODO: we don't really need to `make_exception_ptr` each time. It still
// allocates memory underneath while can be replaced with single instance
// per type created on start-up.
template <class ErrorT,
class DecayedT = std::decay_t<ErrorT>,
bool IsError = is_error_v<DecayedT>,
class = std::enable_if_t<IsError>>
_future(ErrorT&& e)
: base_t(
seastar::make_exception_future<ValueT>(
errorator_type::make_exception_ptr(e))) {
static_assert(errorator_type::contains_once_v<DecayedT>,
"ErrorT is not enlisted in errorator");
}
template <class ValueFuncT, class ErrorVisitorT>
auto safe_then(ValueFuncT&& valfunc, ErrorVisitorT&& errfunc) {
static_assert((... && std::is_invocable_v<ErrorVisitorT,
AllowedErrors>),
"provided Error Visitor is not exhaustive");
using value_func_result_t =
typename std::conditional_t<std::is_void_v<ValueT>,
std::invoke_result<ValueFuncT>,
std::invoke_result<ValueFuncT, ValueT>>::type;
// recognize whether there can be any error coming from the Value
// Function.
using value_func_errorator_t = get_errorator_t<value_func_result_t>;
// mutate the Value Function's errorator to harvest errors coming
// from the Error Visitor. Yes, it's perfectly fine to fail error
// handling at one step and delegate even broader set of issues
// to next continuation.
using return_errorator_t = make_errorator_t<
value_func_errorator_t,
std::decay_t<std::invoke_result_t<ErrorVisitorT, AllowedErrors>>...>;
// OK, now we know about all errors next continuation must take
// care about. If Visitor handled everything and the Value Func
// doesn't return any, we'll finish with errorator<>::future
// which is just vanilla seastar::future – that's it, next cont
// finally could use `.then()`!
using futurator_t = \
typename return_errorator_t::template futurize<value_func_result_t>;
// `seastar::futurize`, used internally by `then_wrapped()`, would
// wrap any non-`seastar::future` type coming from Value Func into
// `seastar::future`. As we really don't want to end with things
// like `seastar::future<errorator::future<...>>`, we need either:
// * convert the errorated future into plain in the lambda below
// and back here or
// * specialize the `seastar::futurize<T>` to get proper kind of
// future directly from `::then_wrapped()`.
// As C++17 doesn't guarantee copy elision when non-same types are
// involved while examination of assemblies from GCC 8.1 confirmed
// extra copying, switch to the second approach has been made.
return this->then_wrapped(
[ valfunc = std::forward<ValueFuncT>(valfunc),
errfunc = std::forward<ErrorVisitorT>(errfunc)
] (auto&& future) mutable noexcept {
if (__builtin_expect(future.failed(), false)) {
return _safe_then_handle_errors<futurator_t>(
std::move(future), std::forward<ErrorVisitorT>(errfunc));
} else {
// NOTE: using `seastar::future::get()` here is a bit bloaty
// as the method rechecks availability of future's value and,
// if it's unavailable, does the `::do_wait()` path (yes, it
// targets `seastar::thread`). Actually this is dead code as
// `then_wrapped()` executes the lambda only when the future
// is available (which means: failed or ready). However, GCC
// hasn't optimized it out:
//
// if (__builtin_expect(future.failed(), false)) {
// ea25: 48 83 bd c8 fe ff ff cmpq $0x2,-0x138(%rbp)
// ea2c: 02
// ea2d: 0f 87 f0 05 00 00 ja f023 <ceph::osd::
// ...
// /// If get() is called in a \ref seastar::thread context,
// /// then it need not be available; instead, the thread will
// /// be paused until the future becomes available.
// [[gnu::always_inline]]
// std::tuple<T...> get() {
// if (!_state.available()) {
// ea3a: 0f 85 1b 05 00 00 jne ef5b <ceph::osd::
// }
// ...
//
// I don't perceive this as huge issue. Though, it cannot be
// claimed errorator has 0 overhead on hot path. The perfect
// solution here would be mark the `::get_available_state()`
// as `protected` and use dedicated `get_value()` exactly as
// `::then()` already does.
return futurator_t::invoke(std::forward<ValueFuncT>(valfunc),
std::move(future).get());
}
});
}
/**
* unsafe_thread_get
*
* Only valid within a seastar_thread. Ignores errorator protections
* and throws any contained exceptions.
*
* Should really only be used within test code
* (see test/crimson/gtest_seastar.h).
*/
auto &&unsafe_get() {
return seastar::future<ValueT>::get();
}
auto unsafe_get0() {
return seastar::future<ValueT>::get0();
}
template <class FuncT>
_future finally(FuncT &&func) {
return this->then_wrapped(
[func = std::forward<FuncT>(func)](auto &&result) mutable noexcept {
if constexpr (seastar::InvokeReturnsAnyFuture<FuncT>) {
return ::seastar::futurize_invoke(std::forward<FuncT>(func)).then_wrapped(
[result = std::move(result)](auto&& f_res) mutable {
// TODO: f_res.failed()
(void)f_res.discard_result();
return std::move(result);
});
} else {
try {
func();
} catch (...) {
// TODO: rethrow
}
return std::move(result);
}
});
}
_future<::crimson::errorated_future_marker<void>>
discard_result() noexcept {
return safe_then([](auto&&) {});
}
// taking ErrorFuncOne and ErrorFuncTwo separately from ErrorFuncTail
// to avoid SFINAE
template <class ValueFunc,
class ErrorFuncHead,
class... ErrorFuncTail>
auto safe_then(ValueFunc&& value_func,
ErrorFuncHead&& error_func_head,
ErrorFuncTail&&... error_func_tail) {
static_assert(sizeof...(ErrorFuncTail) > 0);
return safe_then(
std::forward<ValueFunc>(value_func),
composer(std::forward<ErrorFuncHead>(error_func_head),
std::forward<ErrorFuncTail>(error_func_tail)...));
}
template <class ValueFunc>
auto safe_then(ValueFunc&& value_func) {
return safe_then(std::forward<ValueFunc>(value_func),
errorator_type::pass_further{});
}
template <class ValueFunc,
class... ErrorFuncs>
auto safe_then_unpack(ValueFunc&& value_func,
ErrorFuncs&&... error_funcs) {
return safe_then(
[value_func=std::move(value_func)] (ValueT&& tuple) mutable {
assert_moveable(value_func);
return std::apply(std::move(value_func), std::move(tuple));
},
std::forward<ErrorFuncs>(error_funcs)...
);
}
template <class Func>
void then(Func&&) = delete;
template <class ErrorVisitorT>
auto handle_error(ErrorVisitorT&& errfunc) {
static_assert((... && std::is_invocable_v<ErrorVisitorT,
AllowedErrors>),
"provided Error Visitor is not exhaustive");
using return_errorator_t = make_errorator_t<
errorator<>,
std::decay_t<std::invoke_result_t<ErrorVisitorT, AllowedErrors>>...>;
using futurator_t = \
typename return_errorator_t::template futurize<::seastar::future<ValueT>>;
return this->then_wrapped(
[ errfunc = std::forward<ErrorVisitorT>(errfunc)
] (auto&& future) mutable noexcept {
if (__builtin_expect(future.failed(), false)) {
return _safe_then_handle_errors<futurator_t>(
std::move(future), std::forward<ErrorVisitorT>(errfunc));
} else {
return typename futurator_t::type{ std::move(future) };
}
});
}
template <class ErrorFuncHead,
class... ErrorFuncTail>
auto handle_error(ErrorFuncHead&& error_func_head,
ErrorFuncTail&&... error_func_tail) {
static_assert(sizeof...(ErrorFuncTail) > 0);
return this->handle_error(
composer(std::forward<ErrorFuncHead>(error_func_head),
std::forward<ErrorFuncTail>(error_func_tail)...));
}
private:
// for ::crimson::do_for_each
template <class Func>
auto _then(Func&& func) {
return base_t::then(std::forward<Func>(func));
}
template <class T>
auto _forward_to(T&& pr) {
return base_t::forward_to(std::forward<T>(pr));
}
template<typename Iterator, typename AsyncAction>
friend inline auto ::crimson::do_for_each(Iterator begin,
Iterator end,
AsyncAction action);
template <typename Iterator, typename AsyncAction, typename FutureT>
friend class ::crimson::do_for_each_state;
template<typename AsyncAction>
friend inline auto ::crimson::repeat(AsyncAction action);
template <typename Result>
friend class ::seastar::future;
// let seastar::do_with_impl to up-cast us to seastar::future.
template<typename T, typename F>
friend inline auto ::seastar::internal::do_with_impl(T&& rvalue, F&& f);
template<typename T1, typename T2, typename T3_or_F, typename... More>
friend inline auto ::seastar::internal::do_with_impl(T1&& rv1, T2&& rv2, T3_or_F&& rv3, More&&... more);
template<typename, typename>
friend class ::crimson::interruptible::interruptible_future_detail;
friend class ::crimson::parallel_for_each_state<AllowedErrors...>;
template <typename IC, typename FT>
friend class ::crimson::interruptible::parallel_for_each_state;
};
class Enabler {};
template <typename T>
using EnableIf = typename std::enable_if<contains_once_v<std::decay_t<T>>, Enabler>::type;
template <typename ErrorFunc>
struct all_same_way_t {
ErrorFunc func;
all_same_way_t(ErrorFunc &&error_func)
: func(std::forward<ErrorFunc>(error_func)) {}
template <typename ErrorT, EnableIf<ErrorT>...>
decltype(auto) operator()(ErrorT&& e) {
using decayed_t = std::decay_t<decltype(e)>;
auto&& handler =
decayed_t::error_t::handle(std::forward<ErrorFunc>(func));
static_assert(std::is_invocable_v<decltype(handler), ErrorT>);
return std::invoke(std::move(handler), std::forward<ErrorT>(e));
}
};
public:
// HACK: `errorated_future_marker` and `_future` is just a hack to
// specialize `seastar::futurize` for category of class templates:
// `future<...>` from distinct errorators. Such tricks are usually
// performed basing on SFINAE and `std::void_t` to check existence
// of a trait/member (`future<...>::errorator_type` in our case).
// Unfortunately, this technique can't be applied as the `futurize`
// lacks the optional parameter. The problem looks awfully similar
// to following SO item: https://stackoverflow.com/a/38860413.
template <class ValueT=void>
using future = _future<::crimson::errorated_future_marker<ValueT>>;
// the visitor that forwards handling of all errors to next continuation
struct pass_further {
template <class ErrorT, EnableIf<ErrorT>...>
decltype(auto) operator()(ErrorT&& e) {
static_assert(contains_once_v<std::decay_t<ErrorT>>,
"passing further disallowed ErrorT");
return std::forward<ErrorT>(e);
}
};
struct discard_all {
template <class ErrorT, EnableIf<ErrorT>...>
void operator()(ErrorT&&) {
static_assert(contains_once_v<std::decay_t<ErrorT>>,
"discarding disallowed ErrorT");
}
};
template <typename T>
static future<T> make_errorator_future(seastar::future<T>&& fut) {
return std::move(fut);
}
// assert_all{ "TODO" };
class assert_all {
const char* const msg = nullptr;
public:
template <std::size_t N>
assert_all(const char (&msg)[N])
: msg(msg) {
}
assert_all() = default;
template <class ErrorT, EnableIf<ErrorT>...>
void operator()(ErrorT&&) {
static_assert(contains_once_v<std::decay_t<ErrorT>>,
"discarding disallowed ErrorT");
if (msg) {
ceph_abort_msg(msg);
} else {
ceph_abort();
}
}
};
template <class ErrorFunc>
static decltype(auto) all_same_way(ErrorFunc&& error_func) {
return all_same_way_t<ErrorFunc>{std::forward<ErrorFunc>(error_func)};
};
// get a new errorator by extending current one with new errors
template <class... NewAllowedErrorsT>
using extend = errorator<AllowedErrors..., NewAllowedErrorsT...>;
// get a new errorator by summing and deduplicating error set of
// the errorator `unify<>` is applied on with another errorator
// provided as template parameter.
template <class OtherErroratorT>
struct unify {
// 1st: generic NOP template
};
template <class OtherAllowedErrorsHead,
class... OtherAllowedErrorsTail>
struct unify<errorator<OtherAllowedErrorsHead,
OtherAllowedErrorsTail...>> {
private:
// 2nd: specialization for errorators with non-empty error set.
//
// split error set of other errorator, passed as template param,
// into head and tail. Mix error set of this errorator with head
// of the other one only if it isn't already present in the set.
using step_errorator = std::conditional_t<
contains_once_v<OtherAllowedErrorsHead> == false,
errorator<AllowedErrors..., OtherAllowedErrorsHead>,
errorator<AllowedErrors...>>;
using rest_errorator = errorator<OtherAllowedErrorsTail...>;
public:
using type = typename step_errorator::template unify<rest_errorator>::type;
};
template <class... EmptyPack>
struct unify<errorator<EmptyPack...>> {
// 3rd: recursion finisher
static_assert(sizeof...(EmptyPack) == 0);
using type = errorator<AllowedErrors...>;
};
// get a new errorator by extending current one with another errorator
template <class E>
using extend_ertr = typename unify<E>::type;
template <typename T=void, typename... A>
static future<T> make_ready_future(A&&... value) {
return future<T>(ready_future_marker(), std::forward<A>(value)...);
}
template <typename T=void>
static
future<T> make_exception_future2(std::exception_ptr&& ex) noexcept {
return future<T>(exception_future_marker(), std::move(ex));
}
template <typename T=void>
static
future<T> make_exception_future2(seastar::future_state_base&& state) noexcept {
return future<T>(exception_future_marker(), std::move(state));
}
template <typename T=void, typename Exception>
static
future<T> make_exception_future2(Exception&& ex) noexcept {
return make_exception_future2<T>(std::make_exception_ptr(std::forward<Exception>(ex)));
}
static auto now() {
return make_ready_future<>();
}
template <typename Container, typename Func>
static inline auto parallel_for_each(Container&& container, Func&& func) noexcept {
return crimson::parallel_for_each<decltype(std::begin(container)), Func, AllowedErrors...>(
std::begin(container),
std::end(container),
std::forward<Func>(func));
}
template <typename Iterator, typename Func>
static inline errorator<AllowedErrors...>::future<>
parallel_for_each(Iterator first, Iterator last, Func&& func) noexcept {
return crimson::parallel_for_each<Iterator, Func, AllowedErrors...>(
first,
last,
std::forward<Func>(func));
}
private:
template <class T>
class futurize {
using vanilla_futurize = seastar::futurize<T>;
// explicit specializations for nested type is not allowed unless both
// the member template and the enclosing template are specialized. see
// section temp.expl.spec, N4659
template <class Stored, int Dummy = 0>
struct stored_to_future {
using type = future<Stored>;
};
template <int Dummy>
struct stored_to_future <seastar::internal::monostate, Dummy> {
using type = future<>;
};
public:
using type =
typename stored_to_future<typename vanilla_futurize::value_type>::type;
template <class Func, class... Args>
static type invoke(Func&& func, Args&&... args) {
try {
return vanilla_futurize::invoke(std::forward<Func>(func),
std::forward<Args>(args)...);
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <class Func>
static type invoke(Func&& func, seastar::internal::monostate) {
try {
return vanilla_futurize::invoke(std::forward<Func>(func));
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <typename Arg>
static type make_exception_future(Arg&& arg) {
return vanilla_futurize::make_exception_future(std::forward<Arg>(arg));
}
};
template <template <class...> class ErroratedFutureT,
class ValueT>
class futurize<ErroratedFutureT<::crimson::errorated_future_marker<ValueT>>> {
public:
using type = ::crimson::errorator<AllowedErrors...>::future<ValueT>;
template <class Func, class... Args>
static type invoke(Func&& func, Args&&... args) {
try {
return ::seastar::futurize_invoke(std::forward<Func>(func),
std::forward<Args>(args)...);
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <class Func>
static type invoke(Func&& func, seastar::internal::monostate) {
try {
return ::seastar::futurize_invoke(std::forward<Func>(func));
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <typename Arg>
static type make_exception_future(Arg&& arg) {
return ::crimson::errorator<AllowedErrors...>::make_exception_future2<ValueT>(std::forward<Arg>(arg));
}
};
template <typename InterruptCond, typename FutureType>
class futurize<
::crimson::interruptible::interruptible_future_detail<
InterruptCond, FutureType>> {
public:
using type = ::crimson::interruptible::interruptible_future_detail<
InterruptCond, typename futurize<FutureType>::type>;
template <typename Func, typename... Args>
static type invoke(Func&& func, Args&&... args) {
try {
return ::seastar::futurize_invoke(std::forward<Func>(func),
std::forward<Args>(args)...);
} catch(...) {
return seastar::futurize<
::crimson::interruptible::interruptible_future_detail<
InterruptCond, FutureType>>::make_exception_future(
std::current_exception());
}
}
template <typename Func>
static type invoke(Func&& func, seastar::internal::monostate) {
try {
return ::seastar::futurize_invoke(std::forward<Func>(func));
} catch(...) {
return seastar::futurize<
::crimson::interruptible::interruptible_future_detail<
InterruptCond, FutureType>>::make_exception_future(
std::current_exception());
}
}
template <typename Arg>
static type make_exception_future(Arg&& arg) {
return ::seastar::futurize<
::crimson::interruptible::interruptible_future_detail<
InterruptCond, FutureType>>::make_exception_future(
std::forward<Arg>(arg));
}
};
template <class ErrorT>
static std::exception_ptr make_exception_ptr(ErrorT&& e) {
// calling via interface class due to encapsulation and friend relations.
return e.error_t<std::decay_t<ErrorT>>::to_exception_ptr();
}
// needed because of:
// * return_errorator_t::template futurize<...> in `safe_then()`,
// * conversion to `std::exception_ptr` in `future::future(ErrorT&&)`.
// the friendship with all errorators is an idea from Kefu to fix build
// issues on GCC 9. This version likely fixes some access violation bug
// we were exploiting before.
template <class...>
friend class errorator;
template<typename, typename>
friend class ::crimson::interruptible::interruptible_future_detail;
}; // class errorator, generic template
// no errors? errorator<>::future is plain seastar::future then!
template <>
class errorator<> {
public:
template <class ValueT=void>
using future = ::seastar::futurize_t<ValueT>;
template <class T>
using futurize = ::seastar::futurize<T>;
// get a new errorator by extending current one with errors
template <class... NewAllowedErrors>
using extend = errorator<NewAllowedErrors...>;
// get a new errorator by extending current one with another errorator
template <class E>
using extend_ertr = E;
// errorator with empty error set never contains any error
template <class T>
static constexpr bool contains_once_v = false;
}; // class errorator, <> specialization
template <class ErroratorOne,
class ErroratorTwo,
class... FurtherErrators>
struct compound_errorator {
private:
// generic template. Empty `FurtherErrators` are handled by
// the specialization below.
static_assert(sizeof...(FurtherErrators) > 0);
using step =
typename compound_errorator<ErroratorOne, ErroratorTwo>::type;
public:
using type =
typename compound_errorator<step, FurtherErrators...>::type;
};
template <class ErroratorOne,
class ErroratorTwo>
struct compound_errorator<ErroratorOne, ErroratorTwo> {
// specialization for empty `FurtherErrators` arg pack
using type =
typename ErroratorOne::template unify<ErroratorTwo>::type;
};
template <class... Args>
using compound_errorator_t = typename compound_errorator<Args...>::type;
// this is conjunction of two nasty features: C++14's variable template
// and inline global variable of C++17. The latter is crucial to ensure
// the variable will get the same address across all translation units.
template <int ErrorV>
inline std::error_code ec = std::error_code(ErrorV, std::generic_category());
template <int ErrorV>
using ct_error_code = unthrowable_wrapper<const std::error_code&, ec<ErrorV>>;
namespace ct_error {
using enoent = ct_error_code<static_cast<int>(std::errc::no_such_file_or_directory)>;
using enodata = ct_error_code<static_cast<int>(std::errc::no_message_available)>;
using invarg = ct_error_code<static_cast<int>(std::errc::invalid_argument)>;
using input_output_error = ct_error_code<static_cast<int>(std::errc::io_error)>;
using object_corrupted = ct_error_code<static_cast<int>(std::errc::illegal_byte_sequence)>;
using permission_denied = ct_error_code<static_cast<int>(std::errc::permission_denied)>;
using operation_not_supported =
ct_error_code<static_cast<int>(std::errc::operation_not_supported)>;
using not_connected = ct_error_code<static_cast<int>(std::errc::not_connected)>;
using timed_out = ct_error_code<static_cast<int>(std::errc::timed_out)>;
using erange =
ct_error_code<static_cast<int>(std::errc::result_out_of_range)>;
using ebadf =
ct_error_code<static_cast<int>(std::errc::bad_file_descriptor)>;
using enospc =
ct_error_code<static_cast<int>(std::errc::no_space_on_device)>;
using value_too_large = ct_error_code<static_cast<int>(std::errc::value_too_large)>;
using eagain =
ct_error_code<static_cast<int>(std::errc::resource_unavailable_try_again)>;
using file_too_large =
ct_error_code<static_cast<int>(std::errc::file_too_large)>;
using address_in_use = ct_error_code<static_cast<int>(std::errc::address_in_use)>;
using address_not_available = ct_error_code<static_cast<int>(std::errc::address_not_available)>;
using ecanceled = ct_error_code<static_cast<int>(std::errc::operation_canceled)>;
using einprogress = ct_error_code<static_cast<int>(std::errc::operation_in_progress)>;
using enametoolong = ct_error_code<static_cast<int>(std::errc::filename_too_long)>;
using eexist = ct_error_code<static_cast<int>(std::errc::file_exists)>;
using edquot = ct_error_code<int(122)>;
constexpr int cmp_fail_error_value = 4095;
using cmp_fail = ct_error_code<int(cmp_fail_error_value)>;
struct pass_further_all {
template <class ErrorT>
decltype(auto) operator()(ErrorT&& e) {
return std::forward<ErrorT>(e);
}
};
struct discard_all {
template <class ErrorT>
void operator()(ErrorT&&) {
}
};
class assert_all {
const char* const msg = nullptr;
public:
template <std::size_t N>
assert_all(const char (&msg)[N])
: msg(msg) {
}
assert_all() = default;
template <class ErrorT>
void operator()(ErrorT&&) {
if (msg) {
ceph_abort(msg);
} else {
ceph_abort();
}
}
};
template <class ErrorFunc>
static decltype(auto) all_same_way(ErrorFunc&& error_func) {
return [
error_func = std::forward<ErrorFunc>(error_func)
] (auto&& e) mutable -> decltype(auto) {
using decayed_t = std::decay_t<decltype(e)>;
auto&& handler =
decayed_t::error_t::handle(std::forward<ErrorFunc>(error_func));
return std::invoke(std::move(handler), std::forward<decltype(e)>(e));
};
};
}
using stateful_errc = stateful_error_t<std::errc>;
using stateful_errint = stateful_error_t<int>;
using stateful_ec = stateful_error_t<std::error_code>;
template <typename F>
struct is_errorated_future {
static constexpr bool value = false;
};
template <template <class...> class ErroratedFutureT,
class ValueT>
struct is_errorated_future<
ErroratedFutureT<::crimson::errorated_future_marker<ValueT>>
> {
static constexpr bool value = true;
};
template <typename T>
constexpr bool is_errorated_future_v = is_errorated_future<T>::value;
} // namespace crimson
// open the `seastar` namespace to specialize `futurize`. This is not
// pretty for sure. I just hope it's not worse than e.g. specializing
// `hash` in the `std` namespace. The justification is copy avoidance
// in `future<...>::safe_then()`. See the comments there for details.
namespace seastar {
// Container is a placeholder for errorator::_future<> template
template <template <class> class Container,
class Value>
struct futurize<Container<::crimson::errorated_future_marker<Value>>> {
using errorator_type = typename Container<
::crimson::errorated_future_marker<Value>>::errorator_type;
using type = typename errorator_type::template future<Value>;
using value_type = seastar::internal::future_stored_type_t<Value>;
template<typename Func, typename... FuncArgs>
[[gnu::always_inline]]
static type apply(Func&& func, std::tuple<FuncArgs...>&& args) noexcept {
try {
return std::apply(
std::forward<Func>(func),
std::forward<std::tuple<FuncArgs...>>(args));
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template<typename Func, typename... FuncArgs>
[[gnu::always_inline]]
static inline type invoke(Func&& func, FuncArgs&&... args) noexcept {
try {
return func(std::forward<FuncArgs>(args)...);
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <class Func>
[[gnu::always_inline]]
static type invoke(Func&& func, seastar::internal::monostate) noexcept {
try {
return func();
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <typename Arg>
[[gnu::always_inline]]
static type make_exception_future(Arg&& arg) {
return errorator_type::template make_exception_future2<Value>(std::forward<Arg>(arg));
}
private:
template<typename PromiseT, typename Func>
static void satisfy_with_result_of(PromiseT&& pr, Func&& func) {
// this may use the protected variant of `seastar::future::forward_to()`
// because:
// 1. `seastar::future` established a friendship with with all
// specializations of `seastar::futurize`, including this
// one (we're in the `seastar` namespace!) WHILE
// 2. any errorated future declares now the friendship with any
// `seastar::futurize<...>`.
func().forward_to(std::move(pr));
}
template <typename U>
friend class future;
};
template <template <class> class Container,
class Value>
struct continuation_base_from_future<Container<::crimson::errorated_future_marker<Value>>> {
using type = continuation_base<Value>;
};
} // namespace seastar
| 50,368 | 36.117907 | 108 | h |
null | ceph-main/src/crimson/common/exception.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <exception>
#include <seastar/core/future.hh>
#include <seastar/core/future-util.hh>
#include "crimson/common/log.h"
#include "crimson/common/interruptible_future.h"
namespace crimson::common {
class interruption : public std::exception
{};
class system_shutdown_exception final : public interruption{
public:
const char* what() const noexcept final {
return "system shutting down";
}
};
class actingset_changed final : public interruption {
public:
actingset_changed(bool sp) : still_primary(sp) {}
const char* what() const noexcept final {
return "acting set changed";
}
bool is_primary() const {
return still_primary;
}
private:
const bool still_primary;
};
template<typename Func, typename... Args>
inline seastar::future<> handle_system_shutdown(Func&& func, Args&&... args)
{
return seastar::futurize_invoke(std::forward<Func>(func),
std::forward<Args>(args)...)
.handle_exception([](std::exception_ptr eptr) {
if (*eptr.__cxa_exception_type() ==
typeid(crimson::common::system_shutdown_exception)) {
crimson::get_logger(ceph_subsys_osd).debug(
"operation skipped, system shutdown");
return seastar::now();
}
std::rethrow_exception(eptr);
});
}
}
| 1,342 | 23.418182 | 76 | h |
null | ceph-main/src/crimson/common/fatal_signal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "fatal_signal.h"
#include <csignal>
#include <iostream>
#include <string_view>
#define BOOST_STACKTRACE_USE_ADDR2LINE
#include <boost/stacktrace.hpp>
#include <seastar/core/reactor.hh>
#include "common/safe_io.h"
#include "include/scope_guard.h"
FatalSignal::FatalSignal()
{
install_oneshot_signals_handler<SIGSEGV,
SIGABRT,
SIGBUS,
SIGILL,
SIGFPE,
SIGXCPU,
SIGXFSZ,
SIGSYS>();
}
template <int... SigNums>
void FatalSignal::install_oneshot_signals_handler()
{
(install_oneshot_signal_handler<SigNums>() , ...);
}
static void reraise_fatal(const int signum)
{
// use default handler to dump core
::signal(signum, SIG_DFL);
// normally, we won't get here. if we do, something is very weird.
if (::raise(signum)) {
std::cerr << "reraise_fatal: failed to re-raise signal " << signum
<< std::endl;
} else {
std::cerr << "reraise_fatal: default handler for signal " << signum
<< " didn't terminate the process?" << std::endl;
}
std::cerr << std::flush;
::_exit(1);
}
[[gnu::noinline]] void FatalSignal::signal_entry(
const int signum,
siginfo_t* const info,
void*)
{
if (static std::atomic_bool handled{false}; handled.exchange(true)) {
return;
}
assert(info);
FatalSignal::signaled(signum, *info);
reraise_fatal(signum);
}
template <int SigNum>
void FatalSignal::install_oneshot_signal_handler()
{
struct sigaction sa;
// it's a bad idea to use a lambda here. On GCC there are `operator()`
// and `_FUN()`. Controlling their inlineability is hard (impossible?).
sa.sa_sigaction = signal_entry;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO | SA_RESTART | SA_NODEFER;
if constexpr (SigNum == SIGSEGV) {
sa.sa_flags |= SA_ONSTACK;
}
[[maybe_unused]] auto r = ::sigaction(SigNum, &sa, nullptr);
assert(r == 0);
}
[[gnu::noinline]] static void print_backtrace(std::string_view cause) {
std::cerr << cause;
if (seastar::engine_is_ready()) {
std::cerr << " on shard " << seastar::this_shard_id();
}
// nobody wants to see things like `FatalSignal::signaled()` or
// `print_backtrace()` in our backtraces. `+ 1` is for the extra
// frame created by kernel (signal trampoline, it will take care
// about e.g. sigreturn(2) calling; see the man page).
constexpr std::size_t FRAMES_TO_SKIP = 3 + 1;
std::cerr << ".\nBacktrace:\n";
std::cerr << boost::stacktrace::stacktrace(
FRAMES_TO_SKIP,
static_cast<std::size_t>(-1)/* max depth same as the default one */);
std::cerr << std::flush;
// TODO: dump crash related meta data to $crash_dir
// see handle_fatal_signal()
}
static void print_segv_info(const siginfo_t& siginfo)
{
std::cerr \
<< "Dump of siginfo:" << std::endl
<< " si_signo: " << siginfo.si_signo << std::endl
<< " si_errno: " << siginfo.si_errno << std::endl
<< " si_code: " << siginfo.si_code << std::endl
<< " si_pid: " << siginfo.si_pid << std::endl
<< " si_uid: " << siginfo.si_uid << std::endl
<< " si_status: " << siginfo.si_status << std::endl
<< " si_utime: " << siginfo.si_utime << std::endl
<< " si_stime: " << siginfo.si_stime << std::endl
<< " si_int: " << siginfo.si_int << std::endl
<< " si_ptr: " << siginfo.si_ptr << std::endl
<< " si_overrun: " << siginfo.si_overrun << std::endl
<< " si_timerid: " << siginfo.si_timerid << std::endl
<< " si_addr: " << siginfo.si_addr << std::endl
<< " si_band: " << siginfo.si_band << std::endl
<< " si_fd: " << siginfo.si_fd << std::endl
<< " si_addr_lsb: " << siginfo.si_addr_lsb << std::endl
<< " si_lower: " << siginfo.si_lower << std::endl
<< " si_upper: " << siginfo.si_upper << std::endl
<< " si_pkey: " << siginfo.si_pkey << std::endl
<< " si_call_addr: " << siginfo.si_call_addr << std::endl
<< " si_syscall: " << siginfo.si_syscall << std::endl
<< " si_arch: " << siginfo.si_arch << std::endl;
std::cerr << std::flush;
}
static void print_proc_maps()
{
const int fd = ::open("/proc/self/maps", O_RDONLY);
if (fd < 0) {
std::cerr << "can't open /proc/self/maps. procfs not mounted?" << std::endl;
return;
}
const auto fd_guard = make_scope_guard([fd] {
::close(fd);
});
std::cerr << "Content of /proc/self/maps:" << std::endl;
while (true) {
char chunk[4096] = {0, };
const ssize_t r = safe_read(fd, chunk, sizeof(chunk) - 1);
if (r < 0) {
std::cerr << "error while reading /proc/self/maps: " << r << std::endl;
return;
} else {
std::cerr << chunk << std::flush;
if (r < static_cast<ssize_t>(sizeof(chunk) - 1)) {
return; // eof
}
}
}
}
[[gnu::noinline]] void FatalSignal::signaled(const int signum,
const siginfo_t& siginfo)
{
switch (signum) {
case SIGSEGV:
print_backtrace("Segmentation fault");
print_segv_info(siginfo);
break;
case SIGABRT:
print_backtrace("Aborting");
break;
default:
print_backtrace(fmt::format("Signal {}", signum));
break;
}
print_proc_maps();
}
| 5,453 | 30.526012 | 80 | cc |
null | ceph-main/src/crimson/common/fatal_signal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <csignal>
class FatalSignal {
public:
FatalSignal();
private:
static void signal_entry(int signum, siginfo_t* siginfo, void* p);
static void signaled(int signum, const siginfo_t& siginfo);
template <int... SigNums>
void install_oneshot_signals_handler();
template <int SigNum>
void install_oneshot_signal_handler();
};
| 461 | 20 | 72 | h |
null | ceph-main/src/crimson/common/fixed_kv_node_layout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <algorithm>
#include <iostream>
#include <boost/iterator/counting_iterator.hpp>
#include "include/byteorder.h"
#include "crimson/common/layout.h"
namespace crimson::common {
template <typename T, bool is_const>
struct maybe_const_t {
};
template<typename T>
struct maybe_const_t<T, true> {
using type = const T*;
};
template<typename T>
struct maybe_const_t<T, false> {
using type = T*;
};
/**
* FixedKVNodeLayout
*
* Reusable implementation of a fixed size block mapping
* K -> V with internal representations KINT and VINT.
*
* Uses absl::container_internal::Layout for the actual memory layout.
*
* The primary interface exposed is centered on the iterator
* and related methods.
*
* Also included are helpers for doing splits and merges as for a btree.
*/
template <
size_t CAPACITY,
typename Meta,
typename MetaInt,
typename K,
typename KINT,
typename V,
typename VINT,
bool VALIDATE_INVARIANTS=true>
class FixedKVNodeLayout {
char *buf = nullptr;
using L = absl::container_internal::Layout<ceph_le32, MetaInt, KINT, VINT>;
static constexpr L layout{1, 1, CAPACITY, CAPACITY};
public:
template <bool is_const>
struct iter_t {
friend class FixedKVNodeLayout;
using parent_t = typename maybe_const_t<FixedKVNodeLayout, is_const>::type;
parent_t node;
uint16_t offset = 0;
iter_t() = default;
iter_t(
parent_t parent,
uint16_t offset) : node(parent), offset(offset) {}
iter_t(const iter_t &) noexcept = default;
iter_t(iter_t &&) noexcept = default;
template<bool is_const_ = is_const>
iter_t(const iter_t<false>& it, std::enable_if_t<is_const_, int> = 0)
: iter_t{it.node, it.offset}
{}
iter_t &operator=(const iter_t &) = default;
iter_t &operator=(iter_t &&) = default;
// Work nicely with for loops without requiring a nested type.
using reference = iter_t&;
iter_t &operator*() { return *this; }
iter_t *operator->() { return this; }
iter_t operator++(int) {
auto ret = *this;
++offset;
return ret;
}
iter_t &operator++() {
++offset;
return *this;
}
iter_t operator--(int) {
assert(offset > 0);
auto ret = *this;
--offset;
return ret;
}
iter_t &operator--() {
assert(offset > 0);
--offset;
return *this;
}
uint16_t operator-(const iter_t &rhs) const {
assert(rhs.node == node);
return offset - rhs.offset;
}
iter_t operator+(uint16_t off) const {
return iter_t(
node,
offset + off);
}
iter_t operator-(uint16_t off) const {
return iter_t(
node,
offset - off);
}
friend bool operator==(const iter_t &lhs, const iter_t &rhs) {
assert(lhs.node == rhs.node);
return lhs.offset == rhs.offset;
}
friend bool operator!=(const iter_t &lhs, const iter_t &rhs) {
return !(lhs == rhs);
}
friend bool operator==(const iter_t<is_const> &lhs, const iter_t<!is_const> &rhs) {
assert(lhs.node == rhs.node);
return lhs.offset == rhs.offset;
}
friend bool operator!=(const iter_t<is_const> &lhs, const iter_t<!is_const> &rhs) {
return !(lhs == rhs);
}
K get_key() const {
return K(node->get_key_ptr()[offset]);
}
K get_next_key_or_max() const {
auto next = *this + 1;
if (next == node->end())
return std::numeric_limits<K>::max();
else
return next->get_key();
}
void set_val(V val) const {
static_assert(!is_const);
node->get_val_ptr()[offset] = VINT(val);
}
V get_val() const {
return V(node->get_val_ptr()[offset]);
};
bool contains(K addr) const {
return (get_key() <= addr) && (get_next_key_or_max() > addr);
}
uint16_t get_offset() const {
return offset;
}
private:
void set_key(K _lb) const {
static_assert(!is_const);
KINT lb;
lb = _lb;
node->get_key_ptr()[offset] = lb;
}
typename maybe_const_t<char, is_const>::type get_key_ptr() const {
return reinterpret_cast<
typename maybe_const_t<char, is_const>::type>(
node->get_key_ptr() + offset);
}
typename maybe_const_t<char, is_const>::type get_val_ptr() const {
return reinterpret_cast<
typename maybe_const_t<char, is_const>::type>(
node->get_val_ptr() + offset);
}
};
using const_iterator = iter_t<true>;
using iterator = iter_t<false>;
struct delta_t {
enum class op_t : uint8_t {
INSERT,
REMOVE,
UPDATE,
} op;
KINT key;
VINT val;
void replay(FixedKVNodeLayout &l) {
switch (op) {
case op_t::INSERT: {
l.insert(l.lower_bound(key), key, val);
break;
}
case op_t::REMOVE: {
auto iter = l.find(key);
assert(iter != l.end());
l.remove(iter);
break;
}
case op_t::UPDATE: {
auto iter = l.find(key);
assert(iter != l.end());
l.update(iter, val);
break;
}
default:
assert(0 == "Impossible");
}
}
bool operator==(const delta_t &rhs) const {
return op == rhs.op &&
key == rhs.key &&
val == rhs.val;
}
};
public:
class delta_buffer_t {
std::vector<delta_t> buffer;
public:
bool empty() const {
return buffer.empty();
}
void insert(
const K &key,
const V &val) {
KINT k;
k = key;
buffer.push_back(
delta_t{
delta_t::op_t::INSERT,
k,
VINT(val)
});
}
void update(
const K &key,
const V &val) {
KINT k;
k = key;
buffer.push_back(
delta_t{
delta_t::op_t::UPDATE,
k,
VINT(val)
});
}
void remove(const K &key) {
KINT k;
k = key;
buffer.push_back(
delta_t{
delta_t::op_t::REMOVE,
k,
VINT()
});
}
void replay(FixedKVNodeLayout &node) {
for (auto &i: buffer) {
i.replay(node);
}
}
size_t get_bytes() const {
return buffer.size() * sizeof(delta_t);
}
void copy_out(char *out, size_t len) {
assert(len == get_bytes());
::memcpy(out, reinterpret_cast<const void *>(buffer.data()), get_bytes());
buffer.clear();
}
void copy_in(const char *out, size_t len) {
assert(empty());
assert(len % sizeof(delta_t) == 0);
buffer = std::vector(
reinterpret_cast<const delta_t*>(out),
reinterpret_cast<const delta_t*>(out + len));
}
bool operator==(const delta_buffer_t &rhs) const {
return buffer == rhs.buffer;
}
};
void journal_insert(
const_iterator _iter,
const K &key,
const V &val,
delta_buffer_t *recorder) {
auto iter = iterator(this, _iter.offset);
if (recorder) {
recorder->insert(
key,
val);
}
insert(iter, key, val);
}
void journal_update(
const_iterator _iter,
const V &val,
delta_buffer_t *recorder) {
auto iter = iterator(this, _iter.offset);
if (recorder) {
recorder->update(iter->get_key(), val);
}
update(iter, val);
}
void journal_replace(
const_iterator _iter,
const K &key,
const V &val,
delta_buffer_t *recorder) {
auto iter = iterator(this, _iter.offset);
if (recorder) {
recorder->remove(iter->get_key());
recorder->insert(key, val);
}
replace(iter, key, val);
}
void journal_remove(
const_iterator _iter,
delta_buffer_t *recorder) {
auto iter = iterator(this, _iter.offset);
if (recorder) {
recorder->remove(iter->get_key());
}
remove(iter);
}
FixedKVNodeLayout(char *buf) :
buf(buf) {}
virtual ~FixedKVNodeLayout() = default;
const_iterator begin() const {
return const_iterator(
this,
0);
}
const_iterator end() const {
return const_iterator(
this,
get_size());
}
iterator begin() {
return iterator(
this,
0);
}
iterator end() {
return iterator(
this,
get_size());
}
const_iterator iter_idx(uint16_t off) const {
return const_iterator(
this,
off);
}
const_iterator find(K l) const {
auto ret = begin();
for (; ret != end(); ++ret) {
if (ret->get_key() == l)
break;
}
return ret;
}
iterator find(K l) {
const auto &tref = *this;
return iterator(this, tref.find(l).offset);
}
const_iterator lower_bound(K l) const {
auto it = std::lower_bound(boost::make_counting_iterator<uint16_t>(0),
boost::make_counting_iterator<uint16_t>(get_size()),
l,
[this](uint16_t i, K key) {
const_iterator iter(this, i);
return iter->get_key() < key;
});
return const_iterator(this, *it);
}
iterator lower_bound(K l) {
const auto &tref = *this;
return iterator(this, tref.lower_bound(l).offset);
}
const_iterator upper_bound(K l) const {
auto it = std::upper_bound(boost::make_counting_iterator<uint16_t>(0),
boost::make_counting_iterator<uint16_t>(get_size()),
l,
[this](K key, uint16_t i) {
const_iterator iter(this, i);
return key < iter->get_key();
});
return const_iterator(this, *it);
}
iterator upper_bound(K l) {
const auto &tref = *this;
return iterator(this, tref.upper_bound(l).offset);
}
const_iterator get_split_pivot() const {
return iter_idx(get_size() / 2);
}
uint16_t get_size() const {
return *layout.template Pointer<0>(buf);
}
/**
* set_size
*
* Set size representation to match size
*/
void set_size(uint16_t size) {
*layout.template Pointer<0>(buf) = size;
}
/**
* get_meta/set_meta
*
* Enables stashing a templated type within the layout.
* Cannot be modified after initial write as it is not represented
* in delta_t
*/
Meta get_meta() const {
MetaInt &metaint = *layout.template Pointer<1>(buf);
return Meta(metaint);
}
void set_meta(const Meta &meta) {
*layout.template Pointer<1>(buf) = MetaInt(meta);
}
constexpr static size_t get_capacity() {
return CAPACITY;
}
bool operator==(const FixedKVNodeLayout &rhs) const {
if (get_size() != rhs.get_size()) {
return false;
}
auto iter = begin();
auto iter2 = rhs.begin();
while (iter != end()) {
if (iter->get_key() != iter2->get_key() ||
iter->get_val() != iter2->get_val()) {
return false;
}
iter++;
iter2++;
}
return true;
}
/**
* split_into
*
* Takes *this and splits its contents into left and right.
*/
K split_into(
FixedKVNodeLayout &left,
FixedKVNodeLayout &right) const {
auto piviter = get_split_pivot();
left.copy_from_foreign(left.begin(), begin(), piviter);
left.set_size(piviter - begin());
right.copy_from_foreign(right.begin(), piviter, end());
right.set_size(end() - piviter);
auto [lmeta, rmeta] = get_meta().split_into(piviter->get_key());
left.set_meta(lmeta);
right.set_meta(rmeta);
return piviter->get_key();
}
/**
* merge_from
*
* Takes two nodes and copies their contents into *this.
*
* precondition: left.size() + right.size() < CAPACITY
*/
void merge_from(
const FixedKVNodeLayout &left,
const FixedKVNodeLayout &right)
{
copy_from_foreign(
end(),
left.begin(),
left.end());
set_size(left.get_size());
copy_from_foreign(
end(),
right.begin(),
right.end());
set_size(left.get_size() + right.get_size());
set_meta(Meta::merge_from(left.get_meta(), right.get_meta()));
}
/**
* balance_into_new_nodes
*
* Takes the contents of left and right and copies them into
* replacement_left and replacement_right such that in the
* event that the number of elements is odd the extra goes to
* the left side iff prefer_left.
*/
static K balance_into_new_nodes(
const FixedKVNodeLayout &left,
const FixedKVNodeLayout &right,
bool prefer_left,
FixedKVNodeLayout &replacement_left,
FixedKVNodeLayout &replacement_right)
{
auto total = left.get_size() + right.get_size();
auto pivot_idx = (left.get_size() + right.get_size()) / 2;
if (total % 2 && prefer_left) {
pivot_idx++;
}
auto replacement_pivot = pivot_idx >= left.get_size() ?
right.iter_idx(pivot_idx - left.get_size())->get_key() :
left.iter_idx(pivot_idx)->get_key();
if (pivot_idx < left.get_size()) {
replacement_left.copy_from_foreign(
replacement_left.end(),
left.begin(),
left.iter_idx(pivot_idx));
replacement_left.set_size(pivot_idx);
replacement_right.copy_from_foreign(
replacement_right.end(),
left.iter_idx(pivot_idx),
left.end());
replacement_right.set_size(left.get_size() - pivot_idx);
replacement_right.copy_from_foreign(
replacement_right.end(),
right.begin(),
right.end());
replacement_right.set_size(total - pivot_idx);
} else {
replacement_left.copy_from_foreign(
replacement_left.end(),
left.begin(),
left.end());
replacement_left.set_size(left.get_size());
replacement_left.copy_from_foreign(
replacement_left.end(),
right.begin(),
right.iter_idx(pivot_idx - left.get_size()));
replacement_left.set_size(pivot_idx);
replacement_right.copy_from_foreign(
replacement_right.end(),
right.iter_idx(pivot_idx - left.get_size()),
right.end());
replacement_right.set_size(total - pivot_idx);
}
auto [lmeta, rmeta] = Meta::rebalance(
left.get_meta(), right.get_meta(), replacement_pivot);
replacement_left.set_meta(lmeta);
replacement_right.set_meta(rmeta);
return replacement_pivot;
}
private:
void insert(
iterator iter,
const K &key,
const V &val) {
if (VALIDATE_INVARIANTS) {
if (iter != begin()) {
assert((iter - 1)->get_key() < key);
}
if (iter != end()) {
assert(iter->get_key() > key);
}
assert(get_size() < CAPACITY);
}
copy_from_local(iter + 1, iter, end());
iter->set_key(key);
iter->set_val(val);
set_size(get_size() + 1);
}
void update(
iterator iter,
V val) {
assert(iter != end());
iter->set_val(val);
}
void replace(
iterator iter,
const K &key,
const V &val) {
assert(iter != end());
if (VALIDATE_INVARIANTS) {
if (iter != begin()) {
assert((iter - 1)->get_key() < key);
}
if ((iter + 1) != end()) {
assert((iter + 1)->get_key() > key);
}
}
iter->set_key(key);
iter->set_val(val);
}
void remove(iterator iter) {
assert(iter != end());
copy_from_local(iter, iter + 1, end());
set_size(get_size() - 1);
}
/**
* get_key_ptr
*
* Get pointer to start of key array
*/
KINT *get_key_ptr() {
return layout.template Pointer<2>(buf);
}
const KINT *get_key_ptr() const {
return layout.template Pointer<2>(buf);
}
/**
* get_val_ptr
*
* Get pointer to start of val array
*/
VINT *get_val_ptr() {
return layout.template Pointer<3>(buf);
}
const VINT *get_val_ptr() const {
return layout.template Pointer<3>(buf);
}
/**
* node_resolve/unresolve_vals
*
* If the representation for values depends in some way on the
* node in which they are located, users may implement
* resolve/unresolve to enable copy_from_foreign to handle that
* transition.
*/
virtual void node_resolve_vals(iterator from, iterator to) const {}
virtual void node_unresolve_vals(iterator from, iterator to) const {}
/**
* copy_from_foreign
*
* Copies entries from [from_src, to_src) to tgt.
*
* tgt and from_src must be from different nodes.
* from_src and to_src must be from the same node.
*/
static void copy_from_foreign(
iterator tgt,
const_iterator from_src,
const_iterator to_src) {
assert(tgt->node != from_src->node);
assert(to_src->node == from_src->node);
memcpy(
tgt->get_val_ptr(), from_src->get_val_ptr(),
to_src->get_val_ptr() - from_src->get_val_ptr());
memcpy(
tgt->get_key_ptr(), from_src->get_key_ptr(),
to_src->get_key_ptr() - from_src->get_key_ptr());
from_src->node->node_resolve_vals(tgt, tgt + (to_src - from_src));
tgt->node->node_unresolve_vals(tgt, tgt + (to_src - from_src));
}
/**
* copy_from_local
*
* Copies entries from [from_src, to_src) to tgt.
*
* tgt, from_src, and to_src must be from the same node.
*/
static void copy_from_local(
iterator tgt,
iterator from_src,
iterator to_src) {
assert(tgt->node == from_src->node);
assert(to_src->node == from_src->node);
memmove(
tgt->get_val_ptr(), from_src->get_val_ptr(),
to_src->get_val_ptr() - from_src->get_val_ptr());
memmove(
tgt->get_key_ptr(), from_src->get_key_ptr(),
to_src->get_key_ptr() - from_src->get_key_ptr());
}
};
}
| 17,059 | 22.337893 | 87 | h |
null | ceph-main/src/crimson/common/formatter.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "formatter.h"
#include <fmt/format.h>
#if FMT_VERSION >= 60000
#include <fmt/chrono.h>
#else
#include <fmt/time.h>
#endif
template <>
struct fmt::formatter<seastar::lowres_system_clock::time_point> {
// ignore the format string
template <typename ParseContext>
constexpr auto parse(ParseContext &ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const seastar::lowres_system_clock::time_point& t,
FormatContext& ctx) {
std::time_t tt = std::chrono::duration_cast<std::chrono::seconds>(
t.time_since_epoch()).count();
auto milliseconds = (t.time_since_epoch() %
std::chrono::seconds(1)).count();
return fmt::format_to(ctx.out(), "{:%Y-%m-%d %H:%M:%S} {:03d}",
fmt::localtime(tt), milliseconds);
}
};
namespace std {
ostream& operator<<(ostream& out,
const seastar::lowres_system_clock::time_point& t)
{
return out << fmt::format("{}", t);
}
}
| 1,099 | 25.829268 | 70 | cc |
null | ceph-main/src/crimson/common/formatter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <seastar/core/lowres_clock.hh>
#include "common/ceph_time.h"
namespace std {
ostream& operator<<(ostream& out,
const seastar::lowres_system_clock::time_point& t);
}
| 297 | 20.285714 | 71 | h |
null | ceph-main/src/crimson/common/gated.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/gate.hh>
#include <seastar/core/future.hh>
#include <seastar/core/future-util.hh>
#include "crimson/common/exception.h"
#include "crimson/common/log.h"
#include "include/ceph_assert.h"
namespace crimson::common {
class Gated {
public:
static seastar::logger& gated_logger() {
return crimson::get_logger(ceph_subsys_osd);
}
template <typename Func, typename T>
inline void dispatch_in_background(const char* what, T& who, Func&& func) {
(void) dispatch(what, who, func);
}
template <typename Func, typename T>
inline seastar::future<> dispatch(const char* what, T& who, Func&& func) {
return seastar::with_gate(pending_dispatch, std::forward<Func>(func)
).handle_exception([what, &who] (std::exception_ptr eptr) {
if (*eptr.__cxa_exception_type() == typeid(system_shutdown_exception)) {
gated_logger().debug(
"{}, {} skipped, system shutdown", who, what);
return;
}
try {
std::rethrow_exception(eptr);
} catch (std::exception& e) {
gated_logger().error(
"{} dispatch() {} caught exception: {}", who, what, e.what());
}
assert(*eptr.__cxa_exception_type()
== typeid(seastar::gate_closed_exception));
});
}
seastar::future<> close() {
return pending_dispatch.close();
}
bool is_closed() const {
return pending_dispatch.is_closed();
}
private:
seastar::gate pending_dispatch;
};
}// namespace crimson::common
| 1,558 | 26.839286 | 78 | h |
null | ceph-main/src/crimson/common/interruptible_future.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future-util.hh>
#include <seastar/core/do_with.hh>
#include <seastar/core/when_all.hh>
#include <seastar/core/thread.hh>
#include "crimson/common/log.h"
#include "crimson/common/errorator.h"
#ifndef NDEBUG
#define INTR_FUT_DEBUG(FMT_MSG, ...) crimson::get_logger(ceph_subsys_).trace(FMT_MSG, ##__VA_ARGS__)
#else
#define INTR_FUT_DEBUG(FMT_MSG, ...)
#endif
// The interrupt condition generally works this way:
//
// 1. It is created by call_with_interruption_impl method, and is recorded in the thread
// local global variable "::crimson::interruptible::interrupt_cond".
// 2. Any continuation that's created within the execution of the continuation
// that calls the call_with_interruption_impl method will capture the "interrupt_cond";
// and when they starts to run, they will put that capture interruption condition
// into "::crimson::interruptible::interrupt_cond" so that further continuations
// created can also capture the interruption condition;
// 3. At the end of the continuation run, the global "interrupt_cond" will be cleared
// to prevent other continuations that are not supposed to be interrupted wrongly
// capture an interruption condition.
// With this approach, continuations capture the interrupt condition at their creation,
// restore the interrupt conditions at the beginning of their execution and clear those
// interrupt conditions at the end of their execution. So the global "interrupt_cond"
// only hold valid interrupt conditions when the corresponding continuations are actually
// running after which it gets cleared. Since continuations can't be executed simultaneously,
// different continuation chains won't be able to interfere with each other.
//
// The global "interrupt_cond" can work as a signal about whether the continuation
// is supposed to be interrupted, the reason that the global "interrupt_cond"
// exists is that there may be this scenario:
//
// Say there's some method PG::func1(), in which the continuations created may
// or may not be supposed to be interrupted in different situations. If we don't
// have a global signal, we have to add an extra parameter to every method like
// PG::func1() to indicate whether the current run should create to-be-interrupted
// continuations or not.
//
// interruptor::with_interruption() and helpers can be used by users to wrap a future in
// the interruption machinery.
namespace crimson::os::seastore {
class TransactionConflictCondition;
}
// GCC tries to instantiate
// seastar::lw_shared_ptr<crimson::os::seastore::TransactionConflictCondition>.
// but we *may* not have the definition of TransactionConflictCondition at this moment,
// a full specialization for lw_shared_ptr_accessors helps to bypass the default
// lw_shared_ptr_accessors implementation, where std::is_base_of<.., T> is used.
namespace seastar::internal {
template<>
struct lw_shared_ptr_accessors<::crimson::os::seastore::TransactionConflictCondition, void>
: lw_shared_ptr_accessors_no_esft<::crimson::os::seastore::TransactionConflictCondition>
{};
}
SEASTAR_CONCEPT(
namespace crimson::interruptible {
template<typename InterruptCond, typename FutureType>
class interruptible_future_detail;
}
namespace seastar::impl {
template <typename InterruptCond, typename FutureType, typename... Rest>
struct is_tuple_of_futures<std::tuple<crimson::interruptible::interruptible_future_detail<InterruptCond, FutureType>, Rest...>>
: is_tuple_of_futures<std::tuple<Rest...>> {};
}
)
namespace crimson::interruptible {
struct ready_future_marker {};
struct exception_future_marker {};
template <typename InterruptCond>
class interruptible_future_builder;
template <typename InterruptCond>
struct interruptor;
template <typename InterruptCond>
using InterruptCondRef = seastar::lw_shared_ptr<InterruptCond>;
template <typename InterruptCond>
struct interrupt_cond_t {
InterruptCondRef<InterruptCond> interrupt_cond;
uint64_t ref_count = 0;
void set(
InterruptCondRef<InterruptCond>& ic) {
INTR_FUT_DEBUG(
"{}: going to set interrupt_cond: {}, ic: {}",
__func__,
(void*)interrupt_cond.get(),
(void*)ic.get());
if (!interrupt_cond) {
interrupt_cond = ic;
}
assert(interrupt_cond.get() == ic.get());
ref_count++;
INTR_FUT_DEBUG(
"{}: interrupt_cond: {}, ref_count: {}",
__func__,
(void*)interrupt_cond.get(),
ref_count);
}
void reset() {
if (--ref_count == 0) {
INTR_FUT_DEBUG(
"{}: clearing interrupt_cond: {},{}",
__func__,
(void*)interrupt_cond.get(),
typeid(InterruptCond).name());
interrupt_cond.release();
} else {
INTR_FUT_DEBUG(
"{}: end without clearing interrupt_cond: {},{}, ref_count: {}",
__func__,
(void*)interrupt_cond.get(),
typeid(InterruptCond).name(),
ref_count);
}
}
};
template <typename InterruptCond>
thread_local interrupt_cond_t<InterruptCond> interrupt_cond;
extern template thread_local interrupt_cond_t<crimson::os::seastore::TransactionConflictCondition>
interrupt_cond<crimson::os::seastore::TransactionConflictCondition>;
template <typename InterruptCond, typename FutureType>
class [[nodiscard]] interruptible_future_detail {};
template <typename FutureType>
struct is_interruptible_future : public std::false_type {};
template <typename InterruptCond, typename FutureType>
struct is_interruptible_future<
interruptible_future_detail<
InterruptCond,
FutureType>>
: public std::true_type {};
template <typename FutureType>
concept IsInterruptibleFuture = is_interruptible_future<FutureType>::value;
template <typename Func, typename... Args>
concept InvokeReturnsInterruptibleFuture =
IsInterruptibleFuture<std::invoke_result_t<Func, Args...>>;
namespace internal {
template <typename InterruptCond, typename Func, typename... Args>
auto call_with_interruption_impl(
InterruptCondRef<InterruptCond> interrupt_condition,
Func&& func, Args&&... args)
{
using futurator_t = seastar::futurize<std::invoke_result_t<Func, Args...>>;
// there might be a case like this:
// with_interruption([] {
// interruptor::do_for_each([] {
// ...
// return interruptible_errorated_future();
// }).safe_then_interruptible([] {
// ...
// });
// })
// In this case, as crimson::do_for_each would directly do futurize_invoke
// for "call_with_interruption", we have to make sure this invocation would
// not errorly release ::crimson::interruptible::interrupt_cond<InterruptCond>
// If there exists an interrupt condition, which means "Func" may not be
// permitted to run as a result of the interruption, test it. If it does
// need to be interrupted, return an interruption; otherwise, restore the
// global "interrupt_cond" with the interruption condition, and go ahead
// executing the Func.
assert(interrupt_condition);
auto fut = interrupt_condition->template may_interrupt<
typename futurator_t::type>();
INTR_FUT_DEBUG(
"call_with_interruption_impl: may_interrupt: {}, "
"local interrupt_condition: {}, "
"global interrupt_cond: {},{}",
(bool)fut,
(void*)interrupt_condition.get(),
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
if (fut) {
return std::move(*fut);
}
interrupt_cond<InterruptCond>.set(interrupt_condition);
auto fut2 = seastar::futurize_invoke(
std::forward<Func>(func),
std::forward<Args>(args)...);
// Clear the global "interrupt_cond" to prevent it from interfering other
// continuation chains.
interrupt_cond<InterruptCond>.reset();
return fut2;
}
}
template <typename InterruptCond, typename Func, seastar::Future Ret>
requires (!InterruptCond::template is_interruption_v<Ret>)
auto call_with_interruption(
InterruptCondRef<InterruptCond> interrupt_condition,
Func&& func, Ret&& fut)
{
using Result = std::invoke_result_t<Func, Ret>;
// if "T" is already an interrupt exception, return it directly;
// otherwise, upper layer application may encounter errors executing
// the "Func" body.
if (fut.failed()) {
std::exception_ptr eptr = fut.get_exception();
if (interrupt_condition->is_interruption(eptr)) {
return seastar::futurize<Result>::make_exception_future(std::move(eptr));
}
return internal::call_with_interruption_impl(
interrupt_condition,
std::forward<Func>(func),
seastar::futurize<Ret>::make_exception_future(
std::move(eptr)));
}
return internal::call_with_interruption_impl(
interrupt_condition,
std::forward<Func>(func),
std::move(fut));
}
template <typename InterruptCond, typename Func, typename T>
requires (InterruptCond::template is_interruption_v<T>)
auto call_with_interruption(
InterruptCondRef<InterruptCond> interrupt_condition,
Func&& func, T&& arg)
{
using Result = std::invoke_result_t<Func, T>;
// if "T" is already an interrupt exception, return it directly;
// otherwise, upper layer application may encounter errors executing
// the "Func" body.
return seastar::futurize<Result>::make_exception_future(
std::get<0>(std::tuple(std::forward<T>(arg))));
}
template <typename InterruptCond, typename Func, typename T>
requires (!InterruptCond::template is_interruption_v<T>) && (!seastar::Future<T>)
auto call_with_interruption(
InterruptCondRef<InterruptCond> interrupt_condition,
Func&& func, T&& arg)
{
return internal::call_with_interruption_impl(
interrupt_condition,
std::forward<Func>(func),
std::forward<T>(arg));
}
template <typename InterruptCond, typename Func,
typename Result = std::invoke_result_t<Func>>
auto call_with_interruption(
InterruptCondRef<InterruptCond> interrupt_condition,
Func&& func)
{
return internal::call_with_interruption_impl(
interrupt_condition,
std::forward<Func>(func));
}
template <typename InterruptCond, typename Func, typename... T,
typename Result = std::invoke_result_t<Func, T...>>
Result non_futurized_call_with_interruption(
InterruptCondRef<InterruptCond> interrupt_condition,
Func&& func, T&&... args)
{
assert(interrupt_condition);
auto fut = interrupt_condition->template may_interrupt<seastar::future<>>();
INTR_FUT_DEBUG(
"non_futurized_call_with_interruption may_interrupt: {}, "
"interrupt_condition: {}, interrupt_cond: {},{}",
(bool)fut,
(void*)interrupt_condition.get(),
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
if (fut) {
std::rethrow_exception(fut->get_exception());
}
interrupt_cond<InterruptCond>.set(interrupt_condition);
try {
if constexpr (std::is_void_v<Result>) {
std::invoke(std::forward<Func>(func), std::forward<T>(args)...);
// Clear the global "interrupt_cond" to prevent it from interfering other
// continuation chains.
interrupt_cond<InterruptCond>.reset();
return;
} else {
auto&& err = std::invoke(std::forward<Func>(func), std::forward<T>(args)...);
interrupt_cond<InterruptCond>.reset();
return std::forward<Result>(err);
}
} catch (std::exception& e) {
// Clear the global "interrupt_cond" to prevent it from interfering other
// continuation chains.
interrupt_cond<InterruptCond>.reset();
throw e;
}
}
template <typename InterruptCond, typename Errorator>
struct interruptible_errorator;
template <typename T>
struct parallel_for_each_ret {
static_assert(seastar::Future<T>);
using type = seastar::future<>;
};
template <template <typename...> typename ErroratedFuture, typename T>
struct parallel_for_each_ret<
ErroratedFuture<
::crimson::errorated_future_marker<T>>> {
using type = ErroratedFuture<::crimson::errorated_future_marker<void>>;
};
template <typename InterruptCond, typename FutureType>
class parallel_for_each_state final : private seastar::continuation_base<> {
using elem_ret_t = std::conditional_t<
IsInterruptibleFuture<FutureType>,
typename FutureType::core_type,
FutureType>;
using future_t = interruptible_future_detail<
InterruptCond,
typename parallel_for_each_ret<elem_ret_t>::type>;
std::vector<future_t> _incomplete;
seastar::promise<> _result;
std::exception_ptr _ex;
private:
void wait_for_one() noexcept {
while (!_incomplete.empty() && _incomplete.back().available()) {
if (_incomplete.back().failed()) {
_ex = _incomplete.back().get_exception();
}
_incomplete.pop_back();
}
if (!_incomplete.empty()) {
seastar::internal::set_callback(std::move(_incomplete.back()),
static_cast<continuation_base<>*>(this));
_incomplete.pop_back();
return;
}
if (__builtin_expect(bool(_ex), false)) {
_result.set_exception(std::move(_ex));
} else {
_result.set_value();
}
delete this;
}
virtual void run_and_dispose() noexcept override {
if (_state.failed()) {
_ex = std::move(_state).get_exception();
}
_state = {};
wait_for_one();
}
task* waiting_task() noexcept override { return _result.waiting_task(); }
public:
parallel_for_each_state(size_t n) {
_incomplete.reserve(n);
}
void add_future(future_t&& f) {
_incomplete.push_back(std::move(f));
}
future_t get_future() {
auto ret = _result.get_future();
wait_for_one();
return ret;
}
static future_t now() {
return seastar::now();
}
};
template <typename InterruptCond, typename T>
class [[nodiscard]] interruptible_future_detail<InterruptCond, seastar::future<T>>
: private seastar::future<T> {
public:
using core_type = seastar::future<T>;
template <typename U>
using interrupt_futurize_t =
typename interruptor<InterruptCond>::template futurize_t<U>;
using core_type::get0;
using core_type::core_type;
using core_type::get_exception;
using core_type::ignore_ready_future;
[[gnu::always_inline]]
interruptible_future_detail(seastar::future<T>&& base)
: core_type(std::move(base))
{}
using value_type = typename seastar::future<T>::value_type;
using tuple_type = typename seastar::future<T>::tuple_type;
[[gnu::always_inline]]
value_type&& get() {
if (core_type::available()) {
return core_type::get();
} else {
// destined to wait!
auto interruption_condition = interrupt_cond<InterruptCond>.interrupt_cond;
INTR_FUT_DEBUG(
"interruptible_future_detail::get() waiting, interrupt_cond: {},{}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
interrupt_cond<InterruptCond>.reset();
auto&& value = core_type::get();
interrupt_cond<InterruptCond>.set(interruption_condition);
INTR_FUT_DEBUG(
"interruptible_future_detail::get() got, interrupt_cond: {},{}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
return std::move(value);
}
}
using core_type::available;
using core_type::failed;
template <typename Func,
typename Result = interrupt_futurize_t<
std::invoke_result_t<Func, seastar::future<T>>>>
[[gnu::always_inline]]
Result then_wrapped_interruptible(Func&& func) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
return core_type::then_wrapped(
[func=std::move(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& fut) mutable {
return call_with_interruption(
std::move(interrupt_condition),
std::forward<Func>(func),
std::move(fut));
});
}
template <typename Func>
[[gnu::always_inline]]
auto then_interruptible(Func&& func) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
if constexpr (std::is_void_v<T>) {
auto fut = core_type::then(
[func=std::move(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
() mutable {
return call_with_interruption(
interrupt_condition,
std::move(func));
});
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
} else {
auto fut = core_type::then(
[func=std::move(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(T&& arg) mutable {
return call_with_interruption(
interrupt_condition,
std::move(func),
std::forward<T>(arg));
});
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
}
template <typename Func>
[[gnu::always_inline]]
auto then_unpack_interruptible(Func&& func) {
return then_interruptible([func=std::forward<Func>(func)](T&& tuple) mutable {
return std::apply(std::forward<Func>(func), std::move(tuple));
});
}
template <typename Func,
typename Result =interrupt_futurize_t<
std::result_of_t<Func(std::exception_ptr)>>>
[[gnu::always_inline]]
Result handle_exception_interruptible(Func&& func) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
return core_type::then_wrapped(
[func=std::forward<Func>(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& fut) mutable {
if (!fut.failed()) {
return seastar::make_ready_future<T>(fut.get());
} else {
return call_with_interruption(
interrupt_condition,
std::move(func),
fut.get_exception());
}
});
}
template <bool may_interrupt = true, typename Func,
typename Result = interrupt_futurize_t<
std::result_of_t<Func()>>>
[[gnu::always_inline]]
Result finally_interruptible(Func&& func) {
if constexpr (may_interrupt) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
return core_type::then_wrapped(
[func=std::forward<Func>(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& fut) mutable {
return call_with_interruption(
interrupt_condition,
std::move(func));
});
} else {
return core_type::finally(std::forward<Func>(func));
}
}
template <typename Func,
typename Result = interrupt_futurize_t<
std::result_of_t<Func(
typename seastar::function_traits<Func>::template arg<0>::type)>>>
[[gnu::always_inline]]
Result handle_exception_type_interruptible(Func&& func) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
using trait = seastar::function_traits<Func>;
static_assert(trait::arity == 1, "func can take only one parameter");
using ex_type = typename trait::template arg<0>::type;
return core_type::then_wrapped(
[func=std::forward<Func>(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& fut) mutable -> Result {
if (!fut.failed()) {
return seastar::make_ready_future<T>(fut.get());
} else {
try {
std::rethrow_exception(fut.get_exception());
} catch (ex_type& ex) {
return call_with_interruption(
interrupt_condition,
std::move(func), ex);
}
}
});
}
using my_type = interruptible_future_detail<InterruptCond, seastar::future<T>>;
template <typename Func>
[[gnu::always_inline]]
my_type finally(Func&& func) {
return core_type::finally(std::forward<Func>(func));
}
private:
template <typename Func>
[[gnu::always_inline]]
auto handle_interruption(Func&& func) {
return core_type::then_wrapped(
[func=std::move(func)](auto&& fut) mutable {
if (fut.failed()) {
std::exception_ptr ex = fut.get_exception();
if (InterruptCond::is_interruption(ex)) {
return seastar::futurize_invoke(std::move(func), std::move(ex));
} else {
return seastar::make_exception_future<T>(std::move(ex));
}
} else {
return seastar::make_ready_future<T>(fut.get());
}
});
}
seastar::future<T> to_future() {
return static_cast<core_type&&>(std::move(*this));
}
// this is only supposed to be invoked by seastar functions
template <typename Func,
typename Result = interrupt_futurize_t<
std::result_of_t<Func(seastar::future<T>)>>>
[[gnu::always_inline]]
Result then_wrapped(Func&& func) {
return core_type::then_wrapped(
[func=std::move(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& fut) mutable {
return call_with_interruption(
interrupt_condition,
std::forward<Func>(func),
std::move(fut));
});
}
friend interruptor<InterruptCond>;
friend class interruptible_future_builder<InterruptCond>;
template <typename U>
friend struct ::seastar::futurize;
template <typename>
friend class ::seastar::future;
template <typename HeldState, typename Future>
friend class seastar::internal::do_with_state;
template<typename TX, typename F>
friend inline auto ::seastar::internal::do_with_impl(TX&& rvalue, F&& f);
template<typename T1, typename T2, typename T3_or_F, typename... More>
friend inline auto ::seastar::internal::do_with_impl(T1&& rv1, T2&& rv2, T3_or_F&& rv3, More&&... more);
template <typename T1, typename T2, typename... More>
friend auto seastar::internal::do_with_impl(T1&& rv1, T2&& rv2, More&&... more);
template <typename, typename>
friend class ::crimson::maybe_handle_error_t;
template <typename>
friend class ::seastar::internal::extract_values_from_futures_vector;
template <typename, typename>
friend class interruptible_future_detail;
template <typename ResolvedVectorTransform, typename Future>
friend inline typename ResolvedVectorTransform::future_type
seastar::internal::complete_when_all(
std::vector<Future>&& futures,
typename std::vector<Future>::iterator pos) noexcept;
template <typename>
friend class ::seastar::internal::when_all_state_component;
template <typename Lock, typename Func>
friend inline auto seastar::with_lock(Lock& lock, Func&& f);
template <typename IC, typename FT>
friend class parallel_for_each_state;
};
template <typename InterruptCond, typename Errorator>
struct interruptible_errorator {
using base_ertr = Errorator;
using intr_cond_t = InterruptCond;
template <typename ValueT = void>
using future = interruptible_future_detail<InterruptCond,
typename Errorator::template future<ValueT>>;
template <class... NewAllowedErrorsT>
using extend = interruptible_errorator<
InterruptCond,
typename Errorator::template extend<NewAllowedErrorsT...>>;
template <class Ertr>
using extend_ertr = interruptible_errorator<
InterruptCond,
typename Errorator::template extend_ertr<Ertr>>;
template <typename ValueT = void, typename... A>
static interruptible_future_detail<
InterruptCond,
typename Errorator::template future<ValueT>>
make_ready_future(A&&... value) {
return interruptible_future_detail<
InterruptCond, typename Errorator::template future<ValueT>>(
Errorator::template make_ready_future<ValueT>(
std::forward<A>(value)...));
}
static interruptible_future_detail<
InterruptCond,
typename Errorator::template future<>> now() {
return interruptible_future_detail<
InterruptCond, typename Errorator::template future<>>(
Errorator::now());
}
using pass_further = typename Errorator::pass_further;
};
template <typename InterruptCond,
template <typename...> typename ErroratedFuture,
typename T>
class [[nodiscard]] interruptible_future_detail<
InterruptCond,
ErroratedFuture<::crimson::errorated_future_marker<T>>>
: private ErroratedFuture<::crimson::errorated_future_marker<T>>
{
public:
using core_type = ErroratedFuture<crimson::errorated_future_marker<T>>;
using errorator_type = typename core_type::errorator_type;
using interrupt_errorator_type =
interruptible_errorator<InterruptCond, errorator_type>;
using interrupt_cond_type = InterruptCond;
template <typename U>
using interrupt_futurize_t =
typename interruptor<InterruptCond>::template futurize_t<U>;
using core_type::available;
using core_type::failed;
using core_type::core_type;
using core_type::get_exception;
using value_type = typename core_type::value_type;
interruptible_future_detail(seastar::future<T>&& fut)
: core_type(std::move(fut))
{}
template <template <typename...> typename ErroratedFuture2,
typename... U>
[[gnu::always_inline]]
interruptible_future_detail(
ErroratedFuture2<::crimson::errorated_future_marker<U...>>&& fut)
: core_type(std::move(fut)) {}
template <template <typename...> typename ErroratedFuture2,
typename... U>
[[gnu::always_inline]]
interruptible_future_detail(
interruptible_future_detail<InterruptCond,
ErroratedFuture2<::crimson::errorated_future_marker<U...>>>&& fut)
: core_type(static_cast<typename std::decay_t<decltype(fut)>::core_type&&>(fut)) {
using src_errorator_t = \
typename ErroratedFuture2<
::crimson::errorated_future_marker<U...>>::errorator_type;
static_assert(core_type::errorator_type::template contains_once_v<
src_errorator_t>,
"conversion is only possible from less-or-eq errorated future!");
}
[[gnu::always_inline]]
interruptible_future_detail(
interruptible_future_detail<InterruptCond, seastar::future<T>>&& fut)
: core_type(static_cast<seastar::future<T>&&>(fut)) {}
template <class... A>
[[gnu::always_inline]]
interruptible_future_detail(ready_future_marker, A&&... a)
: core_type(::seastar::make_ready_future<typename core_type::value_type>(
std::forward<A>(a)...)) {
}
[[gnu::always_inline]]
interruptible_future_detail(exception_future_marker, ::seastar::future_state_base&& state) noexcept
: core_type(::seastar::futurize<core_type>::make_exception_future(std::move(state))) {
}
[[gnu::always_inline]]
interruptible_future_detail(exception_future_marker, std::exception_ptr&& ep) noexcept
: core_type(::seastar::futurize<core_type>::make_exception_future(std::move(ep))) {
}
template<bool interruptible = true, typename ValueInterruptCondT, typename ErrorVisitorT,
std::enable_if_t<!interruptible, int> = 0>
[[gnu::always_inline]]
auto safe_then_interruptible(ValueInterruptCondT&& valfunc, ErrorVisitorT&& errfunc) {
auto fut = core_type::safe_then(
std::forward<ValueInterruptCondT>(valfunc),
std::forward<ErrorVisitorT>(errfunc));
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
template <typename... Args>
auto si_then(Args&&... args) {
return safe_then_interruptible(std::forward<Args>(args)...);
}
template<bool interruptible = true, typename ValueInterruptCondT, typename ErrorVisitorT,
typename U = T, std::enable_if_t<!std::is_void_v<U> && interruptible, int> = 0>
[[gnu::always_inline]]
auto safe_then_interruptible(ValueInterruptCondT&& valfunc, ErrorVisitorT&& errfunc) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
auto fut = core_type::safe_then(
[func=std::move(valfunc),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(T&& args) mutable {
return call_with_interruption(
interrupt_condition,
std::move(func),
std::forward<T>(args));
}, [func=std::move(errfunc),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& err) mutable -> decltype(auto) {
constexpr bool return_void = std::is_void_v<
std::invoke_result_t<ErrorVisitorT,
std::decay_t<decltype(err)>>>;
constexpr bool return_err = ::crimson::is_error_v<
std::decay_t<std::invoke_result_t<ErrorVisitorT,
std::decay_t<decltype(err)>>>>;
if constexpr (return_err || return_void) {
return non_futurized_call_with_interruption(
interrupt_condition,
std::move(func),
std::move(err));
} else {
return call_with_interruption(
interrupt_condition,
std::move(func),
std::move(err));
}
});
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
template<bool interruptible = true, typename ValueInterruptCondT, typename ErrorVisitorT,
typename U = T, std::enable_if_t<std::is_void_v<U> && interruptible, int> = 0>
[[gnu::always_inline]]
auto safe_then_interruptible(ValueInterruptCondT&& valfunc, ErrorVisitorT&& errfunc) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
auto fut = core_type::safe_then(
[func=std::move(valfunc),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
() mutable {
return call_with_interruption(
interrupt_condition,
std::move(func));
}, [func=std::move(errfunc),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& err) mutable -> decltype(auto) {
constexpr bool return_void = std::is_void_v<
std::invoke_result_t<ErrorVisitorT,
std::decay_t<decltype(err)>>>;
constexpr bool return_err = ::crimson::is_error_v<
std::decay_t<std::invoke_result_t<ErrorVisitorT,
std::decay_t<decltype(err)>>>>;
if constexpr (return_err || return_void) {
return non_futurized_call_with_interruption(
interrupt_condition,
std::move(func),
std::move(err));
} else {
return call_with_interruption(
interrupt_condition,
std::move(func),
std::move(err));
}
});
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
template <bool interruptible = true, typename ValueInterruptCondT,
typename U = T, std::enable_if_t<std::is_void_v<T> && interruptible, int> = 0>
[[gnu::always_inline]]
auto safe_then_interruptible(ValueInterruptCondT&& valfunc) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
auto fut = core_type::safe_then(
[func=std::move(valfunc),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
() mutable {
return call_with_interruption(
interrupt_condition,
std::move(func));
});
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
template <typename ValFuncT, typename ErrorFuncT>
[[gnu::always_inline]]
auto safe_then_unpack_interruptible(ValFuncT&& func, ErrorFuncT&& errfunc) {
return safe_then_interruptible([func=std::forward<ValFuncT>(func)](T&& tuple) mutable {
return std::apply(std::forward<ValFuncT>(func), std::move(tuple));
}, std::forward<ErrorFuncT>(errfunc));
}
template <typename ValFuncT>
[[gnu::always_inline]]
auto safe_then_unpack_interruptible(ValFuncT&& func) {
return safe_then_interruptible([func=std::forward<ValFuncT>(func)](T&& tuple) mutable {
return std::apply(std::forward<ValFuncT>(func), std::move(tuple));
});
}
template <bool interruptible = true, typename ValueInterruptCondT,
typename U = T, std::enable_if_t<!std::is_void_v<T> && interruptible, int> = 0>
[[gnu::always_inline]]
auto safe_then_interruptible(ValueInterruptCondT&& valfunc) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
auto fut = core_type::safe_then(
[func=std::move(valfunc),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(T&& arg) mutable {
return call_with_interruption(
interrupt_condition,
std::move(func),
std::forward<T>(arg));
});
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
template <bool interruptible = true, typename ValueInterruptCondT,
std::enable_if_t<!interruptible, int> = 0>
[[gnu::always_inline]]
auto safe_then_interruptible(ValueInterruptCondT&& valfunc) {
auto fut = core_type::safe_then(std::forward<ValueInterruptCondT>(valfunc));
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
template <typename ValueInterruptCondT,
typename ErrorVisitorHeadT,
typename... ErrorVisitorTailT>
[[gnu::always_inline]]
auto safe_then_interruptible(ValueInterruptCondT&& valfunc,
ErrorVisitorHeadT&& err_func_head,
ErrorVisitorTailT&&... err_func_tail) {
return safe_then_interruptible(
std::forward<ValueInterruptCondT>(valfunc),
::crimson::composer(std::forward<ErrorVisitorHeadT>(err_func_head),
std::forward<ErrorVisitorTailT>(err_func_tail)...));
}
template <typename ValueInterruptCondT,
typename ErrorVisitorHeadT,
typename... ErrorVisitorTailT>
[[gnu::always_inline]]
auto safe_then_interruptible_tuple(ValueInterruptCondT&& valfunc,
ErrorVisitorHeadT&& err_func_head,
ErrorVisitorTailT&&... err_func_tail) {
return safe_then_interruptible(
std::forward<ValueInterruptCondT>(valfunc),
::crimson::composer(std::forward<ErrorVisitorHeadT>(err_func_head),
std::forward<ErrorVisitorTailT>(err_func_tail)...));
}
template <typename ValFuncT,
typename ErrorVisitorHeadT,
typename... ErrorVisitorTailT>
[[gnu::always_inline]]
auto safe_then_unpack_interruptible_tuple(
ValFuncT&& valfunc,
ErrorVisitorHeadT&& err_func_head,
ErrorVisitorTailT&&... err_func_tail) {
return safe_then_interruptible_tuple(
[valfunc=std::forward<ValFuncT>(valfunc)](T&& tuple) mutable {
return std::apply(std::forward<ValFuncT>(valfunc), std::move(tuple));
},
::crimson::composer(std::forward<ErrorVisitorHeadT>(err_func_head),
std::forward<ErrorVisitorTailT>(err_func_tail)...));
}
template <bool interruptible = true, typename ErrorFunc>
auto handle_error_interruptible(ErrorFunc&& errfunc) {
if constexpr (interruptible) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
auto fut = core_type::handle_error(
[errfunc=std::move(errfunc),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& err) mutable -> decltype(auto) {
constexpr bool return_void = std::is_void_v<
std::invoke_result_t<ErrorFunc,
std::decay_t<decltype(err)>>>;
constexpr bool return_err = ::crimson::is_error_v<
std::decay_t<std::invoke_result_t<ErrorFunc,
std::decay_t<decltype(err)>>>>;
if constexpr (return_err || return_void) {
return non_futurized_call_with_interruption(
interrupt_condition,
std::move(errfunc),
std::move(err));
} else {
return call_with_interruption(
interrupt_condition,
std::move(errfunc),
std::move(err));
}
});
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
} else {
return core_type::handle_error(std::forward<ErrorFunc>(errfunc));
}
}
template <typename ErrorFuncHead,
typename... ErrorFuncTail>
auto handle_error_interruptible(ErrorFuncHead&& error_func_head,
ErrorFuncTail&&... error_func_tail) {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
static_assert(sizeof...(ErrorFuncTail) > 0);
return this->handle_error_interruptible(
::crimson::composer(
std::forward<ErrorFuncHead>(error_func_head),
std::forward<ErrorFuncTail>(error_func_tail)...));
}
template <typename Func>
[[gnu::always_inline]]
auto finally(Func&& func) {
auto fut = core_type::finally(std::forward<Func>(func));
return (interrupt_futurize_t<decltype(fut)>)(std::move(fut));
}
private:
using core_type::_then;
template <typename Func>
[[gnu::always_inline]]
auto handle_interruption(Func&& func) {
// see errorator.h safe_then definition
using func_result_t =
typename std::invoke_result<Func, std::exception_ptr>::type;
using func_ertr_t =
typename core_type::template get_errorator_t<func_result_t>;
using this_ertr_t = typename core_type::errorator_type;
using ret_ertr_t = typename this_ertr_t::template extend_ertr<func_ertr_t>;
using futurator_t = typename ret_ertr_t::template futurize<func_result_t>;
return core_type::then_wrapped(
[func=std::move(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(auto&& fut) mutable
-> typename futurator_t::type {
if (fut.failed()) {
std::exception_ptr ex = fut.get_exception();
if (InterruptCond::is_interruption(ex)) {
return futurator_t::invoke(std::move(func), std::move(ex));
} else {
return futurator_t::make_exception_future(std::move(ex));
}
} else {
return std::move(fut);
}
});
}
ErroratedFuture<::crimson::errorated_future_marker<T>>
to_future() {
return static_cast<core_type&&>(std::move(*this));
}
friend class interruptor<InterruptCond>;
friend class interruptible_future_builder<InterruptCond>;
template <typename U>
friend struct ::seastar::futurize;
template <typename>
friend class ::seastar::future;
template<typename TX, typename F>
friend inline auto ::seastar::internal::do_with_impl(TX&& rvalue, F&& f);
template<typename T1, typename T2, typename T3_or_F, typename... More>
friend inline auto ::seastar::internal::do_with_impl(T1&& rv1, T2&& rv2, T3_or_F&& rv3, More&&... more);
template <typename T1, typename T2, typename... More>
friend auto seastar::internal::do_with_impl(T1&& rv1, T2&& rv2, More&&... more);
template <typename HeldState, typename Future>
friend class seastar::internal::do_with_state;
template <typename, typename>
friend class ::crimson::maybe_handle_error_t;
template <typename, typename>
friend class interruptible_future_detail;
template <typename Lock, typename Func>
friend inline auto seastar::with_lock(Lock& lock, Func&& f);
template <typename IC, typename FT>
friend class parallel_for_each_state;
};
template <typename InterruptCond, typename T = void>
using interruptible_future =
interruptible_future_detail<InterruptCond, seastar::future<T>>;
template <typename InterruptCond, typename Errorator, typename T = void>
using interruptible_errorated_future =
interruptible_future_detail<
InterruptCond,
typename Errorator::template future<T>>;
template <typename InterruptCond>
struct interruptor
{
public:
using condition = InterruptCond;
template <typename FutureType>
[[gnu::always_inline]]
static interruptible_future_detail<InterruptCond, FutureType>
make_interruptible(FutureType&& fut) {
return interruptible_future_detail<InterruptCond, FutureType>(std::move(fut));
}
[[gnu::always_inline]]
static interruptible_future_detail<InterruptCond, seastar::future<>> now() {
return interruptible_future_detail<
InterruptCond,
seastar::future<>>(seastar::now());
}
template <typename ValueT = void, typename... A>
[[gnu::always_inline]]
static interruptible_future_detail<InterruptCond, seastar::future<ValueT>>
make_ready_future(A&&... value) {
return interruptible_future_detail<InterruptCond, seastar::future<ValueT>>(
seastar::make_ready_future<ValueT>(std::forward<A>(value)...));
}
template <typename T>
struct futurize {
using type = interruptible_future_detail<
InterruptCond, typename seastar::futurize<T>::type>;
};
template <typename FutureType>
struct futurize<interruptible_future_detail<InterruptCond, FutureType>> {
using type = interruptible_future_detail<InterruptCond, FutureType>;
};
template <typename T>
using futurize_t = typename futurize<T>::type;
template <typename Container, typename AsyncAction>
[[gnu::always_inline]]
static auto do_for_each(Container& c, AsyncAction&& action) {
return do_for_each(std::begin(c), std::end(c),
std::forward<AsyncAction>(action));
}
template <typename OpFunc, typename OnInterrupt,
typename... Params>
static inline auto with_interruption_cond(
OpFunc&& opfunc, OnInterrupt&& efunc, InterruptCond &&cond, Params&&... params) {
INTR_FUT_DEBUG(
"with_interruption_cond: interrupt_cond: {}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get());
return internal::call_with_interruption_impl(
seastar::make_lw_shared<InterruptCond>(std::move(cond)),
std::forward<OpFunc>(opfunc),
std::forward<Params>(params)...
).template handle_interruption(std::move(efunc));
}
template <typename OpFunc, typename OnInterrupt,
typename... InterruptCondParams>
static inline auto with_interruption(
OpFunc&& opfunc, OnInterrupt&& efunc, InterruptCondParams&&... params) {
return with_interruption_cond(
std::forward<OpFunc>(opfunc),
std::forward<OnInterrupt>(efunc),
InterruptCond(std::forward<InterruptCondParams>(params)...));
}
template <typename Error,
typename Func,
typename... Params>
static inline auto with_interruption_to_error(
Func &&f, InterruptCond &&cond, Params&&... params) {
using func_result_t = std::invoke_result_t<Func, Params...>;
using func_ertr_t =
typename seastar::template futurize<
func_result_t>::core_type::errorator_type;
using with_trans_ertr =
typename func_ertr_t::template extend_ertr<errorator<Error>>;
using value_type = typename func_result_t::value_type;
using ftype = typename std::conditional_t<
std::is_same_v<value_type, seastar::internal::monostate>,
typename with_trans_ertr::template future<>,
typename with_trans_ertr::template future<value_type>>;
return with_interruption_cond(
std::forward<Func>(f),
[](auto e) -> ftype {
return Error::make();
},
std::forward<InterruptCond>(cond),
std::forward<Params>(params)...);
}
template <typename Func>
[[gnu::always_inline]]
static auto wrap_function(Func&& func) {
return [func=std::forward<Func>(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]() mutable {
return call_with_interruption(
interrupt_condition,
std::forward<Func>(func));
};
}
template <typename Iterator,
InvokeReturnsInterruptibleFuture<typename Iterator::reference> AsyncAction>
[[gnu::always_inline]]
static auto do_for_each(Iterator begin, Iterator end, AsyncAction&& action) {
using Result = std::invoke_result_t<AsyncAction, typename Iterator::reference>;
if constexpr (seastar::Future<typename Result::core_type>) {
return make_interruptible(
::seastar::do_for_each(begin, end,
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(typename Iterator::reference x) mutable {
return call_with_interruption(
interrupt_condition,
std::move(action),
std::forward<decltype(*begin)>(x)).to_future();
})
);
} else {
return make_interruptible(
::crimson::do_for_each(begin, end,
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(typename Iterator::reference x) mutable {
return call_with_interruption(
interrupt_condition,
std::move(action),
std::forward<decltype(*begin)>(x)).to_future();
})
);
}
}
template <typename Iterator, typename AsyncAction>
requires (!InvokeReturnsInterruptibleFuture<AsyncAction, typename Iterator::reference>)
[[gnu::always_inline]]
static auto do_for_each(Iterator begin, Iterator end, AsyncAction&& action) {
if constexpr (seastar::InvokeReturnsAnyFuture<AsyncAction, typename Iterator::reference>) {
return make_interruptible(
::seastar::do_for_each(begin, end,
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(typename Iterator::reference x) mutable {
return call_with_interruption(
interrupt_condition,
std::move(action),
std::forward<decltype(*begin)>(x));
})
);
} else {
return make_interruptible(
::crimson::do_for_each(begin, end,
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(typename Iterator::reference x) mutable {
return call_with_interruption(
interrupt_condition,
std::move(action),
std::forward<decltype(*begin)>(x));
})
);
}
}
template <InvokeReturnsInterruptibleFuture AsyncAction>
[[gnu::always_inline]]
static auto repeat(AsyncAction&& action) {
using Result = std::invoke_result_t<AsyncAction>;
if constexpr (seastar::Future<typename Result::core_type>) {
return make_interruptible(
::seastar::repeat(
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond] {
return call_with_interruption(
interrupt_condition,
std::move(action)).to_future();
})
);
} else {
return make_interruptible(
::crimson::repeat(
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]() mutable {
return call_with_interruption(
interrupt_condition,
std::move(action)).to_future();
})
);
}
}
template <typename AsyncAction>
requires (!InvokeReturnsInterruptibleFuture<AsyncAction>)
[[gnu::always_inline]]
static auto repeat(AsyncAction&& action) {
if constexpr (seastar::InvokeReturnsAnyFuture<AsyncAction>) {
return make_interruptible(
::seastar::repeat(
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond] {
return call_with_interruption(
interrupt_condition,
std::move(action));
})
);
} else {
return make_interruptible(
::crimson::repeat(
[action=std::move(action),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond] {
return call_with_interruption(
interrupt_condition,
std::move(action));
})
);
}
}
template <typename Iterator, typename Func>
static inline auto parallel_for_each(
Iterator begin,
Iterator end,
Func&& func
) noexcept {
using ResultType = std::invoke_result_t<Func, typename Iterator::reference>;
parallel_for_each_state<InterruptCond, ResultType>* s = nullptr;
auto decorated_func =
[func=std::forward<Func>(func),
interrupt_condition=interrupt_cond<InterruptCond>.interrupt_cond]
(decltype(*Iterator())&& x) mutable {
return call_with_interruption(
interrupt_condition,
std::forward<Func>(func),
std::forward<decltype(*begin)>(x));
};
// Process all elements, giving each future the following treatment:
// - available, not failed: do nothing
// - available, failed: collect exception in ex
// - not available: collect in s (allocating it if needed)
while (begin != end) {
auto f = seastar::futurize_invoke(decorated_func, *begin++);
if (!f.available() || f.failed()) {
if (!s) {
using itraits = std::iterator_traits<Iterator>;
auto n = (seastar::internal::iterator_range_estimate_vector_capacity(
begin, end, typename itraits::iterator_category()) + 1);
s = new parallel_for_each_state<InterruptCond, ResultType>(n);
}
s->add_future(std::move(f));
}
}
// If any futures were not available, hand off to parallel_for_each_state::start().
// Otherwise we can return a result immediately.
if (s) {
// s->get_future() takes ownership of s (and chains it to one of the futures it contains)
// so this isn't a leak
return s->get_future();
}
return parallel_for_each_state<InterruptCond, ResultType>::now();
}
template <typename Container, typename Func>
static inline auto parallel_for_each(Container& container, Func&& func) noexcept {
return parallel_for_each(
std::begin(container),
std::end(container),
std::forward<Func>(func));
}
template <typename Iterator, typename Mapper, typename Initial, typename Reduce>
static inline interruptible_future<InterruptCond, Initial> map_reduce(
Iterator begin, Iterator end, Mapper&& mapper, Initial initial, Reduce&& reduce) {
struct state {
Initial result;
Reduce reduce;
};
auto s = seastar::make_lw_shared(state{std::move(initial), std::move(reduce)});
interruptible_future<InterruptCond> ret = seastar::make_ready_future<>();
while (begin != end) {
ret = seastar::futurize_invoke(mapper, *begin++).then_wrapped_interruptible(
[s = s.get(), ret = std::move(ret)] (auto f) mutable {
try {
s->result = s->reduce(std::move(s->result), std::move(f.get0()));
return std::move(ret);
} catch (...) {
return std::move(ret).then_wrapped_interruptible([ex = std::current_exception()] (auto f) {
f.ignore_ready_future();
return seastar::make_exception_future<>(ex);
});
}
});
}
return ret.then_interruptible([s] {
return seastar::make_ready_future<Initial>(std::move(s->result));
});
}
template <typename Range, typename Mapper, typename Initial, typename Reduce>
static inline interruptible_future<InterruptCond, Initial> map_reduce(
Range&& range, Mapper&& mapper, Initial initial, Reduce&& reduce) {
return map_reduce(std::begin(range), std::end(range), std::forward<Mapper>(mapper),
std::move(initial), std::move(reduce));
}
template<typename Fut>
requires seastar::Future<Fut> || IsInterruptibleFuture<Fut>
static auto futurize_invoke_if_func(Fut&& fut) noexcept {
return std::forward<Fut>(fut);
}
template<typename Func>
requires (!seastar::Future<Func>) && (!IsInterruptibleFuture<Func>)
static auto futurize_invoke_if_func(Func&& func) noexcept {
return seastar::futurize_invoke(std::forward<Func>(func));
}
template <typename... FutOrFuncs>
static inline auto when_all(FutOrFuncs&&... fut_or_funcs) noexcept {
return ::seastar::internal::when_all_impl(
futurize_invoke_if_func(std::forward<FutOrFuncs>(fut_or_funcs))...);
}
template <typename... FutOrFuncs>
static inline auto when_all_succeed(FutOrFuncs&&... fut_or_funcs) noexcept {
return ::seastar::internal::when_all_succeed_impl(
futurize_invoke_if_func(std::forward<FutOrFuncs>(fut_or_funcs))...);
}
template <typename Func,
typename Result = futurize_t<std::invoke_result_t<Func>>>
static inline Result async(Func&& func) {
auto interruption_condition = interrupt_cond<InterruptCond>.interrupt_cond;
INTR_FUT_DEBUG(
"interruptible_future_detail::async() yielding out, "
"interrupt_cond {},{} cleared",
(void*)interruption_condition.get(),
typeid(InterruptCond).name());
interrupt_cond<InterruptCond>.reset();
auto ret = seastar::async([func=std::forward<Func>(func),
interruption_condition] () mutable {
return non_futurized_call_with_interruption(
interruption_condition, std::forward<Func>(func));
});
interrupt_cond<InterruptCond>.set(interruption_condition);
INTR_FUT_DEBUG(
"interruptible_future_detail::async() yield back, interrupt_cond: {},{}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
return ret;
}
template <class FutureT>
static decltype(auto) green_get(FutureT&& fut) {
if (fut.available()) {
return fut.get();
} else {
// destined to wait!
auto interruption_condition = interrupt_cond<InterruptCond>.interrupt_cond;
INTR_FUT_DEBUG(
"green_get() waiting, interrupt_cond: {},{}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
interrupt_cond<InterruptCond>.reset();
auto&& value = fut.get();
interrupt_cond<InterruptCond>.set(interruption_condition);
INTR_FUT_DEBUG(
"green_get() got, interrupt_cond: {},{}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
return std::move(value);
}
}
static void yield() {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
auto interruption_condition = interrupt_cond<InterruptCond>.interrupt_cond;
INTR_FUT_DEBUG(
"interruptible_future_detail::yield() yielding out, "
"interrupt_cond {},{} cleared",
(void*)interruption_condition.get(),
typeid(InterruptCond).name());
interrupt_cond<InterruptCond>.reset();
seastar::thread::yield();
interrupt_cond<InterruptCond>.set(interruption_condition);
INTR_FUT_DEBUG(
"interruptible_future_detail::yield() yield back, interrupt_cond: {},{}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
}
static void maybe_yield() {
ceph_assert(interrupt_cond<InterruptCond>.interrupt_cond);
if (seastar::thread::should_yield()) {
auto interruption_condition = interrupt_cond<InterruptCond>.interrupt_cond;
INTR_FUT_DEBUG(
"interruptible_future_detail::may_yield() yielding out, "
"interrupt_cond {},{} cleared",
(void*)interruption_condition.get(),
typeid(InterruptCond).name());
interrupt_cond<InterruptCond>.reset();
seastar::thread::yield();
interrupt_cond<InterruptCond>.set(interruption_condition);
INTR_FUT_DEBUG(
"interruptible_future_detail::may_yield() yield back, interrupt_cond: {},{}",
(void*)interrupt_cond<InterruptCond>.interrupt_cond.get(),
typeid(InterruptCond).name());
}
}
};
} // namespace crimson::interruptible
namespace seastar {
template <typename InterruptCond, typename... T>
struct futurize<::crimson::interruptible::interruptible_future_detail<
InterruptCond, seastar::future<T...>>> {
using type = ::crimson::interruptible::interruptible_future_detail<
InterruptCond, seastar::future<T...>>;
using value_type = typename type::value_type;
using tuple_type = typename type::tuple_type;
static type from_tuple(tuple_type&& value) {
return type(ready_future_marker(), std::move(value));
}
static type from_tuple(const tuple_type& value) {
return type(ready_future_marker(), value);
}
static type from_tuple(value_type&& value) {
return type(ready_future_marker(), std::move(value));
}
static type from_tuple(const value_type& value) {
return type(ready_future_marker(), value);
}
template <typename Func, typename... FuncArgs>
[[gnu::always_inline]]
static inline type invoke(Func&& func, FuncArgs&&... args) noexcept {
try {
return func(std::forward<FuncArgs>(args)...);
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <typename Func>
[[gnu::always_inline]]
static type invoke(Func&& func, seastar::internal::monostate) noexcept {
try {
return ::seastar::futurize_invoke(std::forward<Func>(func));
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <typename Arg>
static inline type make_exception_future(Arg&& arg) noexcept {
return seastar::make_exception_future<T...>(std::forward<Arg>(arg));
}
static inline type make_exception_future(future_state_base&& state) noexcept {
return seastar::internal::make_exception_future<T...>(std::move(state));
}
template<typename PromiseT, typename Func>
static void satisfy_with_result_of(PromiseT&& pr, Func&& func) {
func().forward_to(std::move(pr));
}
};
template <typename InterruptCond,
template <typename...> typename ErroratedFuture,
typename... T>
struct futurize<
::crimson::interruptible::interruptible_future_detail<
InterruptCond,
ErroratedFuture<::crimson::errorated_future_marker<T...>>
>
> {
using type = ::crimson::interruptible::interruptible_future_detail<
InterruptCond,
ErroratedFuture<::crimson::errorated_future_marker<T...>>>;
using core_type = ErroratedFuture<
::crimson::errorated_future_marker<T...>>;
using errorator_type =
::crimson::interruptible::interruptible_errorator<
InterruptCond,
typename ErroratedFuture<
::crimson::errorated_future_marker<T...>>::errorator_type>;
template<typename Func, typename... FuncArgs>
static inline type invoke(Func&& func, FuncArgs&&... args) noexcept {
try {
return func(std::forward<FuncArgs>(args)...);
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <typename Func>
[[gnu::always_inline]]
static type invoke(Func&& func, seastar::internal::monostate) noexcept {
try {
return ::seastar::futurize_invoke(std::forward<Func>(func));
} catch (...) {
return make_exception_future(std::current_exception());
}
}
template <typename Arg>
static inline type make_exception_future(Arg&& arg) noexcept {
return core_type::errorator_type::template make_exception_future2<T...>(
std::forward<Arg>(arg));
}
template<typename PromiseT, typename Func>
static void satisfy_with_result_of(PromiseT&& pr, Func&& func) {
func().forward_to(std::move(pr));
}
};
template <typename InterruptCond, typename FutureType>
struct continuation_base_from_future<
::crimson::interruptible::interruptible_future_detail<InterruptCond, FutureType>> {
using type = typename seastar::continuation_base_from_future<FutureType>::type;
};
template <typename InterruptCond, typename FutureType>
struct is_future<
::crimson::interruptible::interruptible_future_detail<
InterruptCond,
FutureType>>
: std::true_type {};
} // namespace seastar
| 57,249 | 34.758901 | 129 | h |
null | ceph-main/src/crimson/common/layout.h | // Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// MOTIVATION AND TUTORIAL
//
// If you want to put in a single heap allocation N doubles followed by M ints,
// it's easy if N and M are known at compile time.
//
// struct S {
// double a[N];
// int b[M];
// };
//
// S* p = new S;
//
// But what if N and M are known only in run time? Class template Layout to the
// rescue! It's a portable generalization of the technique known as struct hack.
//
// // This object will tell us everything we need to know about the memory
// // layout of double[N] followed by int[M]. It's structurally identical to
// // size_t[2] that stores N and M. It's very cheap to create.
// const Layout<double, int> layout(N, M);
//
// // Allocate enough memory for both arrays. `AllocSize()` tells us how much
// // memory is needed. We are free to use any allocation function we want as
// // long as it returns aligned memory.
// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
//
// // Obtain the pointer to the array of doubles.
// // Equivalent to `reinterpret_cast<double*>(p.get())`.
// //
// // We could have written layout.Pointer<0>(p) instead. If all the types are
// // unique you can use either form, but if some types are repeated you must
// // use the index form.
// double* a = layout.Pointer<double>(p.get());
//
// // Obtain the pointer to the array of ints.
// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
// int* b = layout.Pointer<int>(p);
//
// If we are unable to specify sizes of all fields, we can pass as many sizes as
// we can to `Partial()`. In return, it'll allow us to access the fields whose
// locations and sizes can be computed from the provided information.
// `Partial()` comes in handy when the array sizes are embedded into the
// allocation.
//
// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
// using L = Layout<size_t, size_t, double, int>;
//
// unsigned char* Allocate(size_t n, size_t m) {
// const L layout(1, 1, n, m);
// unsigned char* p = new unsigned char[layout.AllocSize()];
// *layout.Pointer<0>(p) = n;
// *layout.Pointer<1>(p) = m;
// return p;
// }
//
// void Use(unsigned char* p) {
// // First, extract N and M.
// // Specify that the first array has only one element. Using `prefix` we
// // can access the first two arrays but not more.
// constexpr auto prefix = L::Partial(1);
// size_t n = *prefix.Pointer<0>(p);
// size_t m = *prefix.Pointer<1>(p);
//
// // Now we can get pointers to the payload.
// const L layout(1, 1, n, m);
// double* a = layout.Pointer<double>(p);
// int* b = layout.Pointer<int>(p);
// }
//
// The layout we used above combines fixed-size with dynamically-sized fields.
// This is quite common. Layout is optimized for this use case and generates
// optimal code. All computations that can be performed at compile time are
// indeed performed at compile time.
//
// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
// padding in between arrays.
//
// You can manually override the alignment of an array by wrapping the type in
// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
// and behavior as `Layout<..., T, ...>` except that the first element of the
// array of `T` is aligned to `N` (the rest of the elements follow without
// padding). `N` cannot be less than `alignof(T)`.
//
// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
// memory layouts. Check out the reference or code below to discover more.
//
// EXAMPLE
//
// // Immutable move-only string with sizeof equal to sizeof(void*). The
// // string size and the characters are kept in the same heap allocation.
// class CompactString {
// public:
// CompactString(const char* s = "") {
// const size_t size = strlen(s);
// // size_t[1] followed by char[size + 1].
// const L layout(1, size + 1);
// p_.reset(new unsigned char[layout.AllocSize()]);
// // If running under ASAN, mark the padding bytes, if any, to catch
// // memory errors.
// layout.PoisonPadding(p_.get());
// // Store the size in the allocation.
// *layout.Pointer<size_t>(p_.get()) = size;
// // Store the characters in the allocation.
// memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
// }
//
// size_t size() const {
// // Equivalent to reinterpret_cast<size_t&>(*p).
// return *L::Partial().Pointer<size_t>(p_.get());
// }
//
// const char* c_str() const {
// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
// // The argument in Partial(1) specifies that we have size_t[1] in front
// // of the characters.
// return L::Partial(1).Pointer<char>(p_.get());
// }
//
// private:
// // Our heap allocation contains a size_t followed by an array of chars.
// using L = Layout<size_t, char>;
// std::unique_ptr<unsigned char[]> p_;
// };
//
// int main() {
// CompactString s = "hello";
// assert(s.size() == 5);
// assert(strcmp(s.c_str(), "hello") == 0);
// }
//
// DOCUMENTATION
//
// The interface exported by this file consists of:
// - class `Layout<>` and its public members.
// - The public members of class `internal_layout::LayoutImpl<>`. That class
// isn't intended to be used directly, and its name and template parameter
// list are internal implementation details, but the class itself provides
// most of the functionality in this file. See comments on its members for
// detailed documentation.
//
// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
// creates a `Layout` object, which exposes the same functionality by inheriting
// from `LayoutImpl<>`.
#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <ostream>
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <utility>
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
// for C++20 std::span
#include <boost/beast/core/span.hpp>
#include <fmt/format.h>
#if defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
#endif
#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace absl {
namespace container_internal {
// A type wrapper that instructs `Layout` to use the specific alignment for the
// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
// and behavior as `Layout<..., T, ...>` except that the first element of the
// array of `T` is aligned to `N` (the rest of the elements follow without
// padding).
//
// Requires: `N >= alignof(T)` and `N` is a power of 2.
template <class T, size_t N>
struct Aligned;
namespace internal_layout {
template <class T>
struct NotAligned {};
template <class T, size_t N>
struct NotAligned<const Aligned<T, N>> {
static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
};
template <size_t>
using IntToSize = size_t;
template <class>
using TypeToSize = size_t;
template <class T>
struct Type : NotAligned<T> {
using type = T;
};
template <class T, size_t N>
struct Type<Aligned<T, N>> {
using type = T;
};
template <class T>
struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
template <class T, size_t N>
struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
// Note: workaround for https://gcc.gnu.org/PR88115
template <class T>
struct AlignOf : NotAligned<T> {
static constexpr size_t value = alignof(T);
};
template <class T, size_t N>
struct AlignOf<Aligned<T, N>> {
static_assert(N % alignof(T) == 0,
"Custom alignment can't be lower than the type's alignment");
static constexpr size_t value = N;
};
// Does `Ts...` contain `T`?
template <class T, class... Ts>
using Contains = std::disjunction<std::is_same<T, Ts>...>;
template <class From, class To>
using CopyConst =
typename std::conditional_t<std::is_const_v<From>, const To, To>;
// Note: We're not qualifying this with absl:: because it doesn't compile under
// MSVC.
template <class T>
using SliceType = boost::beast::span<T>;
// This namespace contains no types. It prevents functions defined in it from
// being found by ADL.
namespace adl_barrier {
template <class Needle, class... Ts>
constexpr size_t Find(Needle, Needle, Ts...) {
static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
return 0;
}
template <class Needle, class T, class... Ts>
constexpr size_t Find(Needle, T, Ts...) {
return adl_barrier::Find(Needle(), Ts()...) + 1;
}
constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
// Returns `q * m` for the smallest `q` such that `q * m >= n`.
// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
constexpr size_t Max(size_t a) { return a; }
template <class... Ts>
constexpr size_t Max(size_t a, size_t b, Ts... rest) {
return adl_barrier::Max(b < a ? a : b, rest...);
}
template <class T>
std::string TypeName() {
std::string out;
int status = 0;
char* demangled = nullptr;
#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
#endif
if (status == 0 && demangled != nullptr) { // Demangling succeeded.
out = fmt::format("<{}>", demangled);
free(demangled);
} else {
#if defined(__GXX_RTTI) || defined(_CPPRTTI)
out = fmt::format("<{}>", typeid(T).name());
#endif
}
return out;
}
} // namespace adl_barrier
template <bool C>
using EnableIf = typename std::enable_if_t<C, int>;
// Can `T` be a template argument of `Layout`?
template <class T>
using IsLegalElementType = std::integral_constant<
bool, !std::is_reference_v<T> && !std::is_volatile_v<T> &&
!std::is_reference_v<typename Type<T>::type> &&
!std::is_volatile_v<typename Type<T>::type> &&
adl_barrier::IsPow2(AlignOf<T>::value)>;
template <class Elements, class SizeSeq, class OffsetSeq>
class LayoutImpl;
// Public base class of `Layout` and the result type of `Layout::Partial()`.
//
// `Elements...` contains all template arguments of `Layout` that created this
// instance.
//
// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
// passed to `Layout::Partial()` or `Layout::Layout()`.
//
// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
// can compute offsets).
template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
class LayoutImpl<std::tuple<Elements...>, std::index_sequence<SizeSeq...>,
std::index_sequence<OffsetSeq...>> {
private:
static_assert(sizeof...(Elements) > 0, "At least one field is required");
static_assert(std::conjunction_v<IsLegalElementType<Elements>...>,
"Invalid element type (see IsLegalElementType)");
enum {
NumTypes = sizeof...(Elements),
NumSizes = sizeof...(SizeSeq),
NumOffsets = sizeof...(OffsetSeq),
};
// These are guaranteed by `Layout`.
static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
"Internal error");
static_assert(NumTypes > 0, "Internal error");
// Returns the index of `T` in `Elements...`. Results in a compilation error
// if `Elements...` doesn't contain exactly one instance of `T`.
template <class T>
static constexpr size_t ElementIndex() {
static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
"Type not found");
return adl_barrier::Find(Type<T>(),
Type<typename Type<Elements>::type>()...);
}
template <size_t N>
using ElementAlignment =
AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
public:
// Element types of all arrays packed in a tuple.
using ElementTypes = std::tuple<typename Type<Elements>::type...>;
// Element type of the Nth array.
template <size_t N>
using ElementType = typename std::tuple_element<N, ElementTypes>::type;
constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
: size_{sizes...} {}
// Alignment of the layout, equal to the strictest alignment of all elements.
// All pointers passed to the methods of layout must be aligned to this value.
static constexpr size_t Alignment() {
return adl_barrier::Max(AlignOf<Elements>::value...);
}
// Offset in bytes of the Nth array.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Offset<0>() == 0); // The ints starts from 0.
// assert(x.Offset<1>() == 16); // The doubles starts from 16.
//
// Requires: `N <= NumSizes && N < sizeof...(Ts)`.
template <size_t N, EnableIf<N == 0> = 0>
constexpr size_t Offset() const {
return 0;
}
template <size_t N, EnableIf<N != 0> = 0>
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1],
ElementAlignment<N>::value);
}
// Offset in bytes of the array with the specified element type. There must
// be exactly one such array and its zero-based index must be at most
// `NumSizes`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Offset<int>() == 0); // The ints starts from 0.
// assert(x.Offset<double>() == 16); // The doubles starts from 16.
template <class T>
constexpr size_t Offset() const {
return Offset<ElementIndex<T>()>();
}
// Offsets in bytes of all arrays for which the offsets are known.
constexpr std::array<size_t, NumOffsets> Offsets() const {
return {{Offset<OffsetSeq>()...}};
}
// The number of elements in the Nth array. This is the Nth argument of
// `Layout::Partial()` or `Layout::Layout()` (zero-based).
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Size<0>() == 3);
// assert(x.Size<1>() == 4);
//
// Requires: `N < NumSizes`.
template <size_t N>
constexpr size_t Size() const {
static_assert(N < NumSizes, "Index out of bounds");
return size_[N];
}
// The number of elements in the array with the specified element type.
// There must be exactly one such array and its zero-based index must be
// at most `NumSizes`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Size<int>() == 3);
// assert(x.Size<double>() == 4);
template <class T>
constexpr size_t Size() const {
return Size<ElementIndex<T>()>();
}
// The number of elements of all arrays for which they are known.
constexpr std::array<size_t, NumSizes> Sizes() const {
return {{Size<SizeSeq>()...}};
}
// Pointer to the beginning of the Nth array.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
// int* ints = x.Pointer<0>(p);
// double* doubles = x.Pointer<1>(p);
//
// Requires: `N <= NumSizes && N < sizeof...(Ts)`.
// Requires: `p` is aligned to `Alignment()`.
template <size_t N, class Char>
CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
using C = typename std::remove_const<Char>::type;
static_assert(
std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
std::is_same<C, signed char>(),
"The argument must be a pointer to [const] [signed|unsigned] char");
constexpr size_t alignment = Alignment();
(void)alignment;
assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
}
// Pointer to the beginning of the array with the specified element type.
// There must be exactly one such array and its zero-based index must be at
// most `NumSizes`.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
// int* ints = x.Pointer<int>(p);
// double* doubles = x.Pointer<double>(p);
//
// Requires: `p` is aligned to `Alignment()`.
template <class T, class Char>
CopyConst<Char, T>* Pointer(Char* p) const {
return Pointer<ElementIndex<T>()>(p);
}
// Pointers to all arrays for which pointers are known.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
//
// int* ints;
// double* doubles;
// std::tie(ints, doubles) = x.Pointers(p);
//
// Requires: `p` is aligned to `Alignment()`.
//
// Note: We're not using ElementType alias here because it does not compile
// under MSVC.
template <class Char>
std::tuple<CopyConst<
Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
Pointers(Char* p) const {
return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
Pointer<OffsetSeq>(p)...);
}
// The Nth array.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
// Span<int> ints = x.Slice<0>(p);
// Span<double> doubles = x.Slice<1>(p);
//
// Requires: `N < NumSizes`.
// Requires: `p` is aligned to `Alignment()`.
template <size_t N, class Char>
SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
}
// The array with the specified element type. There must be exactly one
// such array and its zero-based index must be less than `NumSizes`.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
// Span<int> ints = x.Slice<int>(p);
// Span<double> doubles = x.Slice<double>(p);
//
// Requires: `p` is aligned to `Alignment()`.
template <class T, class Char>
SliceType<CopyConst<Char, T>> Slice(Char* p) const {
return Slice<ElementIndex<T>()>(p);
}
// All arrays with known sizes.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
//
// Span<int> ints;
// Span<double> doubles;
// std::tie(ints, doubles) = x.Slices(p);
//
// Requires: `p` is aligned to `Alignment()`.
//
// Note: We're not using ElementType alias here because it does not compile
// under MSVC.
template <class Char>
std::tuple<SliceType<CopyConst<
Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
Slices(Char* p) const {
// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
// in 6.1).
(void)p;
return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
Slice<SizeSeq>(p)...);
}
// The size of the allocation that fits all arrays.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes
//
// Requires: `NumSizes == sizeof...(Ts)`.
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1];
}
// If built with --config=asan, poisons padding bytes (if any) in the
// allocation. The pointer must point to a memory block at least
// `AllocSize()` bytes in length.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// Requires: `p` is aligned to `Alignment()`.
template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
void PoisonPadding(const Char* p) const {
Pointer<0>(p); // verify the requirements on `Char` and `p`
}
template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds");
(void)p;
#ifdef ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start =
Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1];
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
}
// Human-readable description of the memory layout. Useful for debugging.
// Slow.
//
// // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
// // by an unknown number of doubles.
// auto x = Layout<char, int, double>::Partial(5, 3);
// assert(x.DebugString() ==
// "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
//
// Each field is in the following format: @offset<type>(sizeof)[size] (<type>
// may be missing depending on the target platform). For example,
// @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
// int is 4 bytes, and we have 3 of those ints. The size of the last field may
// be missing (as in the example above). Only fields with known offsets are
// described. Type names may differ across platforms: one compiler might
// produce "unsigned*" where another produces "unsigned int *".
std::string DebugString() const {
const auto offsets = Offsets();
const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
const std::string types[] = {
adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = fmt::format("@0{}({})", types[0], sizes[0]);
for (size_t i = 0; i != NumOffsets - 1; ++i) {
res += fmt::format("[{}]; @({})", size_[i], offsets[i + 1], types[i + 1], sizes[i + 1]);
}
// NumSizes is a constant that may be zero. Some compilers cannot see that
// inside the if statement "size_[NumSizes - 1]" must be valid.
int last = static_cast<int>(NumSizes) - 1;
if (NumTypes == NumSizes && last >= 0) {
res += fmt::format("[{}]", size_[last]);
}
return res;
}
private:
// Arguments of `Layout::Partial()` or `Layout::Layout()`.
size_t size_[NumSizes > 0 ? NumSizes : 1];
};
template <size_t NumSizes, class... Ts>
using LayoutType = LayoutImpl<
std::tuple<Ts...>, std::make_index_sequence<NumSizes>,
std::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
} // namespace internal_layout
// Descriptor of arrays of various types and sizes laid out in memory one after
// another. See the top of the file for documentation.
//
// Check out the public API of internal_layout::LayoutImpl above. The type is
// internal to the library but its methods are public, and they are inherited
// by `Layout`.
template <class... Ts>
class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
public:
static_assert(sizeof...(Ts) > 0, "At least one field is required");
static_assert(
std::conjunction_v<internal_layout::IsLegalElementType<Ts>...>,
"Invalid element type (see IsLegalElementType)");
// The result type of `Partial()` with `NumSizes` arguments.
template <size_t NumSizes>
using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
// `Layout` knows the element types of the arrays we want to lay out in
// memory but not the number of elements in each array.
// `Partial(size1, ..., sizeN)` allows us to specify the latter. The
// resulting immutable object can be used to obtain pointers to the
// individual arrays.
//
// It's allowed to pass fewer array sizes than the number of arrays. E.g.,
// if all you need is to the offset of the second array, you only need to
// pass one argument -- the number of elements in the first array.
//
// // int[3] followed by 4 bytes of padding and an unknown number of
// // doubles.
// auto x = Layout<int, double>::Partial(3);
// // doubles start at byte 16.
// assert(x.Offset<1>() == 16);
//
// If you know the number of elements in all arrays, you can still call
// `Partial()` but it's more convenient to use the constructor of `Layout`.
//
// Layout<int, double> x(3, 5);
//
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
//
// Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
// Requires: all arguments are convertible to `size_t`.
template <class... Sizes>
static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
static_assert(sizeof...(Sizes) <= sizeof...(Ts));
return PartialType<sizeof...(Sizes)>(std::forward<Sizes>(sizes)...);
}
// Creates a layout with the sizes of all arrays specified. If you know
// only the sizes of the first N arrays (where N can be zero), you can use
// `Partial()` defined above. The constructor is essentially equivalent to
// calling `Partial()` and passing in all array sizes; the constructor is
// provided as a convenient abbreviation.
//
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
: internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
| 26,948 | 35.51626 | 94 | h |
null | ceph-main/src/crimson/common/local_shared_foreign_ptr.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/smp.hh>
#include <seastar/core/future.hh>
#include <seastar/core/sharded.hh>
namespace crimson {
/**
* local_shared_foreign_ptr
*
* See seastar/include/seastar/core/sharded.hh:foreign_ptr
*
* seastar::foreign_ptr wraps a smart ptr by proxying the copy() and destructor
* operations back to the original core. This works well except that copy()
* requires a cross-core call. We need a smart_ptr which allows cross-core
* caching of (for example) OSDMaps, but we want to avoid the overhead inherent
* in incrementing the source smart_ptr on every copy. Thus,
* local_shared_foreign_ptr maintains a core-local foreign_ptr back to the
* original core instance with core-local ref counting.
*/
template <typename PtrType>
class local_shared_foreign_ptr {
using element_type = typename std::pointer_traits<PtrType>::element_type;
using pointer = element_type*;
seastar::lw_shared_ptr<seastar::foreign_ptr<PtrType>> ptr;
/// Wraps a pointer object and remembers the current core.
local_shared_foreign_ptr(seastar::foreign_ptr<PtrType> &&fptr)
: ptr(fptr ? seastar::make_lw_shared(std::move(fptr)) : nullptr) {
assert(!ptr || (ptr && *ptr));
}
template <typename T>
friend local_shared_foreign_ptr<T> make_local_shared_foreign(
seastar::foreign_ptr<T> &&);
public:
/// Constructs a null local_shared_foreign_ptr<>.
local_shared_foreign_ptr() = default;
/// Constructs a null local_shared_foreign_ptr<>.
local_shared_foreign_ptr(std::nullptr_t) : local_shared_foreign_ptr() {}
/// Moves a local_shared_foreign_ptr<> to another object.
local_shared_foreign_ptr(local_shared_foreign_ptr&& other) = default;
/// Copies a local_shared_foreign_ptr<>
local_shared_foreign_ptr(const local_shared_foreign_ptr &other) = default;
/// Releases reference to ptr eventually releasing the contained foreign_ptr
~local_shared_foreign_ptr() = default;
/// Creates a copy of this foreign ptr. Only works if the stored ptr is copyable.
seastar::future<seastar::foreign_ptr<PtrType>> get_foreign() const noexcept {
assert(!ptr || (ptr && *ptr));
return ptr ? ptr->copy() :
seastar::make_ready_future<seastar::foreign_ptr<PtrType>>(nullptr);
}
/// Accesses the wrapped object.
element_type& operator*() const noexcept {
assert(ptr && *ptr);
return **ptr;
}
/// Accesses the wrapped object.
element_type* operator->() const noexcept {
assert(ptr && *ptr);
return &**ptr;
}
/// Access the raw pointer to the wrapped object.
pointer get() const noexcept {
assert(!ptr || (ptr && *ptr));
return ptr ? ptr->get() : nullptr;
}
/// Return the owner-shard of the contained foreign_ptr.
unsigned get_owner_shard() const noexcept {
assert(!ptr || (ptr && *ptr));
return ptr ? ptr->get_owner_shard() : seastar::this_shard_id();
}
/// Checks whether the wrapped pointer is non-null.
operator bool() const noexcept {
assert(!ptr || (ptr && *ptr));
return static_cast<bool>(ptr);
}
/// Move-assigns a \c local_shared_foreign_ptr<>.
local_shared_foreign_ptr& operator=(local_shared_foreign_ptr&& other) noexcept {
ptr = std::move(other.ptr);
return *this;
}
/// Copy-assigns a \c local_shared_foreign_ptr<>.
local_shared_foreign_ptr& operator=(const local_shared_foreign_ptr& other) noexcept {
ptr = other.ptr;
return *this;
}
/// Reset the containing ptr
void reset() noexcept {
assert(!ptr || (ptr && *ptr));
ptr = nullptr;
}
};
/// Wraps a smart_ptr T in a local_shared_foreign_ptr<>.
template <typename T>
local_shared_foreign_ptr<T> make_local_shared_foreign(
seastar::foreign_ptr<T> &&ptr) {
return local_shared_foreign_ptr<T>(std::move(ptr));
}
/// Wraps ptr in a local_shared_foreign_ptr<>.
template <typename T>
local_shared_foreign_ptr<T> make_local_shared_foreign(T &&ptr) {
return make_local_shared_foreign<T>(
ptr ? seastar::make_foreign(std::forward<T>(ptr)) : nullptr);
}
template <typename T, typename U>
inline bool operator==(const local_shared_foreign_ptr<T> &x,
const local_shared_foreign_ptr<U> &y) {
return x.get() == y.get();
}
template <typename T>
inline bool operator==(const local_shared_foreign_ptr<T> &x, std::nullptr_t) {
return x.get() == nullptr;
}
template <typename T>
inline bool operator==(std::nullptr_t, const local_shared_foreign_ptr<T>& y) {
return nullptr == y.get();
}
template <typename T, typename U>
inline bool operator!=(const local_shared_foreign_ptr<T> &x,
const local_shared_foreign_ptr<U> &y) {
return x.get() != y.get();
}
template <typename T>
inline bool operator!=(const local_shared_foreign_ptr<T> &x, std::nullptr_t) {
return x.get() != nullptr;
}
template <typename T>
inline bool operator!=(std::nullptr_t, const local_shared_foreign_ptr<T>& y) {
return nullptr != y.get();
}
template <typename T, typename U>
inline bool operator<(const local_shared_foreign_ptr<T> &x,
const local_shared_foreign_ptr<U> &y) {
return x.get() < y.get();
}
template <typename T>
inline bool operator<(const local_shared_foreign_ptr<T> &x, std::nullptr_t) {
return x.get() < nullptr;
}
template <typename T>
inline bool operator<(std::nullptr_t, const local_shared_foreign_ptr<T>& y) {
return nullptr < y.get();
}
template <typename T, typename U>
inline bool operator<=(const local_shared_foreign_ptr<T> &x,
const local_shared_foreign_ptr<U> &y) {
return x.get() <= y.get();
}
template <typename T>
inline bool operator<=(const local_shared_foreign_ptr<T> &x, std::nullptr_t) {
return x.get() <= nullptr;
}
template <typename T>
inline bool operator<=(std::nullptr_t, const local_shared_foreign_ptr<T>& y) {
return nullptr <= y.get();
}
template <typename T, typename U>
inline bool operator>(const local_shared_foreign_ptr<T> &x,
const local_shared_foreign_ptr<U> &y) {
return x.get() > y.get();
}
template <typename T>
inline bool operator>(const local_shared_foreign_ptr<T> &x, std::nullptr_t) {
return x.get() > nullptr;
}
template <typename T>
inline bool operator>(std::nullptr_t, const local_shared_foreign_ptr<T>& y) {
return nullptr > y.get();
}
template <typename T, typename U>
inline bool operator>=(const local_shared_foreign_ptr<T> &x,
const local_shared_foreign_ptr<U> &y) {
return x.get() >= y.get();
}
template <typename T>
inline bool operator>=(const local_shared_foreign_ptr<T> &x, std::nullptr_t) {
return x.get() >= nullptr;
}
template <typename T>
inline bool operator>=(std::nullptr_t, const local_shared_foreign_ptr<T>& y) {
return nullptr >= y.get();
}
}
namespace std {
template <typename T>
struct hash<crimson::local_shared_foreign_ptr<T>>
: private hash<typename std::pointer_traits<T>::element_type *> {
size_t operator()(const crimson::local_shared_foreign_ptr<T>& p) const {
return hash<typename std::pointer_traits<T>::element_type *>::operator()(p.get());
}
};
}
namespace seastar {
template<typename T>
struct is_smart_ptr<crimson::local_shared_foreign_ptr<T>> : std::true_type {};
}
| 7,380 | 29.004065 | 87 | h |
null | ceph-main/src/crimson/common/log.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "log.h"
static std::array<seastar::logger, ceph_subsys_get_num()> loggers{
#define SUBSYS(name, log_level, gather_level) \
seastar::logger(#name),
#define DEFAULT_SUBSYS(log_level, gather_level) \
seastar::logger("none"),
#include "common/subsys.h"
#undef SUBSYS
#undef DEFAULT_SUBSYS
};
namespace crimson {
seastar::logger& get_logger(int subsys) {
assert(subsys < ceph_subsys_max);
return loggers[subsys];
}
}
| 532 | 23.227273 | 70 | cc |
null | ceph-main/src/crimson/common/log.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <fmt/format.h>
#include <seastar/util/log.hh>
#include "common/subsys_types.h"
namespace crimson {
seastar::logger& get_logger(int subsys);
static inline seastar::log_level to_log_level(int level) {
if (level < 0) {
return seastar::log_level::error;
} else if (level < 1) {
return seastar::log_level::warn;
} else if (level <= 5) {
return seastar::log_level::info;
} else if (level <= 20) {
return seastar::log_level::debug;
} else {
return seastar::log_level::trace;
}
}
}
/* Logging convenience macros
*
* The intention here is to standardize prefixing log lines with the function name
* and a context prefix (like the operator<< for the PG). Place
*
* SET_SUBSYS(osd);
*
* at the top of the file to declare the log lines within the file as being (in this case)
* in the osd subsys. At the beginning of each method/function, add
*
* LOG_PREFIX(Class::method_name)
*
* to set the FNAME symbol to Class::method_name. In order to use the log macros
* within lambdas, capture FNAME by value.
*
* Log lines can then be declared using the appropriate macro below.
*/
#define SET_SUBSYS(subname_) static constexpr auto SOURCE_SUBSYS = ceph_subsys_##subname_
#define LOCAL_LOGGER crimson::get_logger(SOURCE_SUBSYS)
#define LOGGER(subname_) crimson::get_logger(ceph_subsys_##subname_)
#define LOG_PREFIX(x) constexpr auto FNAME = #x
#define LOG(level_, MSG, ...) \
LOCAL_LOGGER.log(level_, "{}: " MSG, FNAME , ##__VA_ARGS__)
#define SUBLOG(subname_, level_, MSG, ...) \
LOGGER(subname_).log(level_, "{}: " MSG, FNAME , ##__VA_ARGS__)
#define TRACE(...) LOG(seastar::log_level::trace, __VA_ARGS__)
#define SUBTRACE(subname_, ...) SUBLOG(subname_, seastar::log_level::trace, __VA_ARGS__)
#define DEBUG(...) LOG(seastar::log_level::debug, __VA_ARGS__)
#define SUBDEBUG(subname_, ...) SUBLOG(subname_, seastar::log_level::debug, __VA_ARGS__)
#define INFO(...) LOG(seastar::log_level::info, __VA_ARGS__)
#define SUBINFO(subname_, ...) SUBLOG(subname_, seastar::log_level::info, __VA_ARGS__)
#define WARN(...) LOG(seastar::log_level::warn, __VA_ARGS__)
#define SUBWARN(subname_, ...) SUBLOG(subname_, seastar::log_level::warn, __VA_ARGS__)
#define ERROR(...) LOG(seastar::log_level::error, __VA_ARGS__)
#define SUBERROR(subname_, ...) SUBLOG(subname_, seastar::log_level::error, __VA_ARGS__)
// *DPP macros are intended to take DoutPrefixProvider implementations, but anything with
// an operator<< will work as a prefix
#define SUBLOGDPP(subname_, level_, MSG, dpp, ...) \
LOGGER(subname_).log(level_, "{} {}: " MSG, dpp, FNAME , ##__VA_ARGS__)
#define SUBTRACEDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::trace, __VA_ARGS__)
#define SUBDEBUGDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::debug, __VA_ARGS__)
#define SUBINFODPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::info, __VA_ARGS__)
#define SUBWARNDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::warn, __VA_ARGS__)
#define SUBERRORDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::error, __VA_ARGS__)
#define LOGDPP(level_, MSG, dpp, ...) \
LOCAL_LOGGER.log(level_, "{} {}: " MSG, dpp, FNAME , ##__VA_ARGS__)
#define TRACEDPP(...) LOGDPP(seastar::log_level::trace, __VA_ARGS__)
#define DEBUGDPP(...) LOGDPP(seastar::log_level::debug, __VA_ARGS__)
#define INFODPP(...) LOGDPP(seastar::log_level::info, __VA_ARGS__)
#define WARNDPP(...) LOGDPP(seastar::log_level::warn, __VA_ARGS__)
#define ERRORDPP(...) LOGDPP(seastar::log_level::error, __VA_ARGS__)
| 3,647 | 39.988764 | 94 | h |
null | ceph-main/src/crimson/common/logclient.cc | #include "crimson/common/logclient.h"
#include <fmt/ranges.h>
#include "include/str_map.h"
#include "messages/MLog.h"
#include "messages/MLogAck.h"
#include "messages/MMonGetVersion.h"
#include "crimson/net/Messenger.h"
#include "crimson/mon/MonClient.h"
#include "mon/MonMap.h"
#include "common/Graylog.h"
using std::map;
using std::ostream;
using std::ostringstream;
using std::string;
using crimson::common::local_conf;
namespace {
seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_monc);
}
}
//TODO: in order to avoid unnecessary maps declarations and moving around,
// create a named structure containing the maps and return optional
// fit to it.
int parse_log_client_options(CephContext *cct,
map<string,string> &log_to_monitors,
map<string,string> &log_to_syslog,
map<string,string> &log_channels,
map<string,string> &log_prios,
map<string,string> &log_to_graylog,
map<string,string> &log_to_graylog_host,
map<string,string> &log_to_graylog_port,
uuid_d &fsid,
string &host)
{
ostringstream oss;
int r = get_conf_str_map_helper(
cct->_conf.get_val<string>("clog_to_monitors"), oss,
&log_to_monitors, CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
logger().error("{} error parsing 'clog_to_monitors'", __func__);
return r;
}
r = get_conf_str_map_helper(
cct->_conf.get_val<string>("clog_to_syslog"), oss,
&log_to_syslog, CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
logger().error("{} error parsing 'clog_to_syslog'", __func__);
return r;
}
r = get_conf_str_map_helper(
cct->_conf.get_val<string>("clog_to_syslog_facility"), oss,
&log_channels, CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
logger().error("{} error parsing 'clog_to_syslog_facility'", __func__);
return r;
}
r = get_conf_str_map_helper(
cct->_conf.get_val<string>("clog_to_syslog_level"), oss,
&log_prios, CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
logger().error("{} error parsing 'clog_to_syslog_level'", __func__);
return r;
}
r = get_conf_str_map_helper(
cct->_conf.get_val<string>("clog_to_graylog"), oss,
&log_to_graylog, CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
logger().error("{} error parsing 'clog_to_graylog'", __func__);
return r;
}
r = get_conf_str_map_helper(
cct->_conf.get_val<string>("clog_to_graylog_host"), oss,
&log_to_graylog_host, CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
logger().error("{} error parsing 'clog_to_graylog_host'", __func__);
return r;
}
r = get_conf_str_map_helper(
cct->_conf.get_val<string>("clog_to_graylog_port"), oss,
&log_to_graylog_port, CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
logger().error("{} error parsing 'clog_to_graylog_port'", __func__);
return r;
}
fsid = cct->_conf.get_val<uuid_d>("fsid");
host = cct->_conf->host;
return 0;
}
LogChannel::LogChannel(LogClient *lc, const string &channel)
: parent(lc), log_channel(channel), log_to_syslog(false),
log_to_monitors(false)
{
}
LogChannel::LogChannel(LogClient *lc, const string &channel,
const string &facility, const string &prio)
: parent(lc), log_channel(channel), log_prio(prio),
syslog_facility(facility), log_to_syslog(false),
log_to_monitors(false)
{
}
LogClient::LogClient(crimson::net::Messenger *m,
logclient_flag_t flags)
: messenger(m), is_mon(flags & FLAG_MON),
last_log_sent(0), last_log(0)
{
}
void LogChannel::set_log_to_monitors(bool v)
{
if (log_to_monitors != v) {
parent->reset();
log_to_monitors = v;
}
}
void LogChannel::update_config(map<string,string> &log_to_monitors,
map<string,string> &log_to_syslog,
map<string,string> &log_channels,
map<string,string> &log_prios,
map<string,string> &log_to_graylog,
map<string,string> &log_to_graylog_host,
map<string,string> &log_to_graylog_port,
uuid_d &fsid,
string &host)
{
logger().debug(
"{} log_to_monitors {} log_to_syslog {} log_channels {} log_prios {}",
__func__, log_to_monitors, log_to_syslog, log_channels, log_prios);
bool to_monitors = (get_str_map_key(log_to_monitors, log_channel,
&CLOG_CONFIG_DEFAULT_KEY) == "true");
bool to_syslog = (get_str_map_key(log_to_syslog, log_channel,
&CLOG_CONFIG_DEFAULT_KEY) == "true");
string syslog_facility = get_str_map_key(log_channels, log_channel,
&CLOG_CONFIG_DEFAULT_KEY);
string prio = get_str_map_key(log_prios, log_channel,
&CLOG_CONFIG_DEFAULT_KEY);
bool to_graylog = (get_str_map_key(log_to_graylog, log_channel,
&CLOG_CONFIG_DEFAULT_KEY) == "true");
string graylog_host = get_str_map_key(log_to_graylog_host, log_channel,
&CLOG_CONFIG_DEFAULT_KEY);
string graylog_port_str = get_str_map_key(log_to_graylog_port, log_channel,
&CLOG_CONFIG_DEFAULT_KEY);
int graylog_port = atoi(graylog_port_str.c_str());
set_log_to_monitors(to_monitors);
set_log_to_syslog(to_syslog);
set_syslog_facility(syslog_facility);
set_log_prio(prio);
if (to_graylog && !graylog) { /* should but isn't */
graylog = seastar::make_shared<ceph::logging::Graylog>("clog");
} else if (!to_graylog && graylog) { /* shouldn't but is */
graylog = nullptr;
}
if (to_graylog && graylog) {
graylog->set_fsid(fsid);
graylog->set_hostname(host);
}
if (graylog && (!graylog_host.empty()) && (graylog_port != 0)) {
graylog->set_destination(graylog_host, graylog_port);
}
logger().debug("{} to_monitors: {} to_syslog: {}"
"syslog_facility: {} prio: {} to_graylog: {} graylog_host: {}"
"graylog_port: {}", __func__, (to_monitors ? "true" : "false"),
(to_syslog ? "true" : "false"), syslog_facility, prio,
(to_graylog ? "true" : "false"), graylog_host, graylog_port);
}
void LogChannel::do_log(clog_type prio, std::stringstream& ss)
{
while (!ss.eof()) {
string s;
getline(ss, s);
if (!s.empty()) {
do_log(prio, s);
}
}
}
void LogChannel::do_log(clog_type prio, const std::string& s)
{
if (CLOG_ERROR == prio) {
logger().error("log {} : {}", prio, s);
} else {
logger().warn("log {} : {}", prio, s);
}
LogEntry e;
e.stamp = ceph_clock_now();
e.addrs = parent->get_myaddrs();
e.name = parent->get_myname();
e.rank = parent->get_myrank();
e.prio = prio;
e.msg = s;
e.channel = get_log_channel();
// seq and who should be set for syslog/graylog/log_to_mon
// log to monitor?
if (log_to_monitors) {
e.seq = parent->queue(e);
} else {
e.seq = parent->get_next_seq();
}
// log to syslog?
if (do_log_to_syslog()) {
logger().warn("{} log to syslog", __func__);
e.log_to_syslog(get_log_prio(), get_syslog_facility());
}
// log to graylog?
if (do_log_to_graylog()) {
logger().warn("{} log to graylog", __func__);
graylog->log_log_entry(&e);
}
}
MessageURef LogClient::get_mon_log_message(log_flushing_t flush_flag)
{
if (flush_flag == log_flushing_t::FLUSH) {
if (log_queue.empty()) {
return {};
}
// reset session
last_log_sent = log_queue.front().seq;
}
return _get_mon_log_message();
}
bool LogClient::are_pending() const
{
return last_log > last_log_sent;
}
MessageURef LogClient::_get_mon_log_message()
{
if (log_queue.empty()) {
return {};
}
// only send entries that haven't been sent yet during this mon
// session! monclient needs to call reset_session() on mon session
// reset for this to work right.
if (last_log_sent == last_log) {
return {};
}
// limit entries per message
const int64_t num_unsent = last_log - last_log_sent;
int64_t num_to_send;
if (local_conf()->mon_client_max_log_entries_per_message > 0) {
num_to_send = std::min(num_unsent,
local_conf()->mon_client_max_log_entries_per_message);
} else {
num_to_send = num_unsent;
}
logger().debug("log_queue is {} last_log {} sent {} num {} unsent {}"
" sending {}", log_queue.size(), last_log,
last_log_sent, log_queue.size(), num_unsent, num_to_send);
ceph_assert((unsigned)num_unsent <= log_queue.size());
auto log_iter = log_queue.begin();
std::deque<LogEntry> out_log_queue; /* will send the logs contained here */
while (log_iter->seq <= last_log_sent) {
++log_iter;
ceph_assert(log_iter != log_queue.end());
}
while (num_to_send--) {
ceph_assert(log_iter != log_queue.end());
out_log_queue.push_back(*log_iter);
last_log_sent = log_iter->seq;
logger().debug(" will send {}", *log_iter);
++log_iter;
}
return crimson::make_message<MLog>(m_fsid,
std::move(out_log_queue));
}
version_t LogClient::queue(LogEntry &entry)
{
entry.seq = ++last_log;
log_queue.push_back(entry);
return entry.seq;
}
void LogClient::reset()
{
if (log_queue.size()) {
log_queue.clear();
}
last_log_sent = last_log;
}
uint64_t LogClient::get_next_seq()
{
return ++last_log;
}
entity_addrvec_t LogClient::get_myaddrs() const
{
return messenger->get_myaddrs();
}
entity_name_t LogClient::get_myrank()
{
return messenger->get_myname();
}
const EntityName& LogClient::get_myname() const
{
return local_conf()->name;
}
seastar::future<> LogClient::handle_log_ack(Ref<MLogAck> m)
{
logger().debug("handle_log_ack {}", *m);
version_t last = m->last;
auto q = log_queue.begin();
while (q != log_queue.end()) {
const LogEntry &entry(*q);
if (entry.seq > last)
break;
logger().debug(" logged {}", entry);
q = log_queue.erase(q);
}
return seastar::now();
}
LogChannelRef LogClient::create_channel(const std::string& name) {
auto it = channels.find(name);
if (it == channels.end()) {
it = channels.insert(it,
{name, seastar::make_lw_shared<LogChannel>(this, name)});
}
return it->second;
}
seastar::future<> LogClient::set_fsid(const uuid_d& fsid) {
m_fsid = fsid;
return seastar::now();
}
| 10,044 | 26.520548 | 77 | cc |
null | ceph-main/src/crimson/common/logclient.h | #ifndef CEPH_LOGCLIENT_H
#define CEPH_LOGCLIENT_H
#include "common/LogEntry.h"
#include "common/ostream_temp.h"
#include "common/ref.h"
#include "include/health.h"
#include "crimson/net/Fwd.h"
#include <seastar/core/future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/lowres_clock.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/timer.hh>
class LogClient;
class MLog;
class MLogAck;
class Message;
struct uuid_d;
struct Connection;
class LogChannel;
namespace ceph {
namespace logging {
class Graylog;
}
}
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
namespace crimson::net {
class Messenger;
}
enum class log_flushing_t {
NO_FLUSH,
FLUSH
};
int parse_log_client_options(CephContext *cct,
std::map<std::string,std::string> &log_to_monitors,
std::map<std::string,std::string> &log_to_syslog,
std::map<std::string,std::string> &log_channels,
std::map<std::string,std::string> &log_prios,
std::map<std::string,std::string> &log_to_graylog,
std::map<std::string,std::string> &log_to_graylog_host,
std::map<std::string,std::string> &log_to_graylog_port,
uuid_d &fsid,
std::string &host);
/** Manage where we output to and at which priority
*
* Not to be confused with the LogClient, which is the almighty coordinator
* of channels. We just deal with the boring part of the logging: send to
* syslog, send to file, generate LogEntry and queue it for the LogClient.
*
* Past queueing the LogEntry, the LogChannel is done with the whole thing.
* LogClient will deal with sending and handling of LogEntries.
*/
class LogChannel : public LoggerSinkSet
{
public:
LogChannel(LogClient *lc, const std::string &channel);
LogChannel(LogClient *lc, const std::string &channel,
const std::string &facility, const std::string &prio);
OstreamTemp debug() {
return OstreamTemp(CLOG_DEBUG, this);
}
void debug(std::stringstream &s) final {
do_log(CLOG_DEBUG, s);
}
/**
* Convenience function mapping health status to
* the appropriate cluster log severity.
*/
OstreamTemp health(health_status_t health) {
switch(health) {
case HEALTH_OK:
return info();
case HEALTH_WARN:
return warn();
case HEALTH_ERR:
return error();
default:
// Invalid health_status_t value
ceph_abort();
}
}
OstreamTemp info() final {
return OstreamTemp(CLOG_INFO, this);
}
void info(std::stringstream &s) final {
do_log(CLOG_INFO, s);
}
OstreamTemp warn() final {
return OstreamTemp(CLOG_WARN, this);
}
void warn(std::stringstream &s) final {
do_log(CLOG_WARN, s);
}
OstreamTemp error() final {
return OstreamTemp(CLOG_ERROR, this);
}
void error(std::stringstream &s) final {
do_log(CLOG_ERROR, s);
}
OstreamTemp sec() final {
return OstreamTemp(CLOG_SEC, this);
}
void sec(std::stringstream &s) final {
do_log(CLOG_SEC, s);
}
void set_log_to_monitors(bool v);
void set_log_to_syslog(bool v) {
log_to_syslog = v;
}
void set_log_channel(const std::string& v) {
log_channel = v;
}
void set_log_prio(const std::string& v) {
log_prio = v;
}
void set_syslog_facility(const std::string& v) {
syslog_facility = v;
}
const std::string& get_log_prio() const { return log_prio; }
const std::string& get_log_channel() const { return log_channel; }
const std::string& get_syslog_facility() const { return syslog_facility; }
bool must_log_to_syslog() const { return log_to_syslog; }
/**
* Do we want to log to syslog?
*
* @return true if log_to_syslog is true and both channel and prio
* are not empty; false otherwise.
*/
bool do_log_to_syslog() {
return must_log_to_syslog() &&
!log_prio.empty() && !log_channel.empty();
}
bool must_log_to_monitors() { return log_to_monitors; }
bool do_log_to_graylog() {
return (graylog != nullptr);
}
using Ref = seastar::lw_shared_ptr<LogChannel>;
/**
* update config values from parsed k/v std::map for each config option
*
* Pick out the relevant value based on our channel.
*/
void update_config(std::map<std::string,std::string> &log_to_monitors,
std::map<std::string,std::string> &log_to_syslog,
std::map<std::string,std::string> &log_channels,
std::map<std::string,std::string> &log_prios,
std::map<std::string,std::string> &log_to_graylog,
std::map<std::string,std::string> &log_to_graylog_host,
std::map<std::string,std::string> &log_to_graylog_port,
uuid_d &fsid,
std::string &host);
void do_log(clog_type prio, std::stringstream& ss) final;
void do_log(clog_type prio, const std::string& s) final;
private:
LogClient *parent;
std::string log_channel;
std::string log_prio;
std::string syslog_facility;
bool log_to_syslog;
bool log_to_monitors;
seastar::shared_ptr<ceph::logging::Graylog> graylog;
};
using LogChannelRef = LogChannel::Ref;
class LogClient
{
public:
enum logclient_flag_t {
NO_FLAGS = 0,
FLAG_MON = 0x1,
};
LogClient(crimson::net::Messenger *m, logclient_flag_t flags);
virtual ~LogClient() = default;
seastar::future<> handle_log_ack(Ref<MLogAck> m);
MessageURef get_mon_log_message(log_flushing_t flush_flag);
bool are_pending() const;
LogChannelRef create_channel() {
return create_channel(CLOG_CHANNEL_DEFAULT);
}
LogChannelRef create_channel(const std::string& name);
void destroy_channel(const std::string& name) {
channels.erase(name);
}
void shutdown() {
channels.clear();
}
uint64_t get_next_seq();
entity_addrvec_t get_myaddrs() const;
const EntityName& get_myname() const;
entity_name_t get_myrank();
version_t queue(LogEntry &entry);
void reset();
seastar::future<> set_fsid(const uuid_d& fsid);
private:
MessageURef _get_mon_log_message();
crimson::net::Messenger *messenger;
bool is_mon;
version_t last_log_sent;
version_t last_log;
std::deque<LogEntry> log_queue;
std::map<std::string, LogChannelRef> channels;
uuid_d m_fsid;
};
#endif
| 6,177 | 25.515021 | 76 | h |
null | ceph-main/src/crimson/common/operation.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "operation.h"
namespace crimson {
void Operation::dump(ceph::Formatter* f) const
{
f->open_object_section("operation");
f->dump_string("type", get_type_name());
f->dump_unsigned("id", id);
{
f->open_object_section("detail");
dump_detail(f);
f->close_section();
}
f->close_section();
}
void Operation::dump_brief(ceph::Formatter* f) const
{
f->open_object_section("operation");
f->dump_string("type", get_type_name());
f->dump_unsigned("id", id);
f->close_section();
}
std::ostream &operator<<(std::ostream &lhs, const Operation &rhs) {
lhs << rhs.get_type_name() << "(id=" << rhs.get_id() << ", detail=";
rhs.print(lhs);
lhs << ")";
return lhs;
}
void Blocker::dump(ceph::Formatter* f) const
{
f->open_object_section("blocker");
f->dump_string("op_type", get_type_name());
{
f->open_object_section("detail");
dump_detail(f);
f->close_section();
}
f->close_section();
}
namespace detail {
void dump_time_event(const char* name,
const utime_t& timestamp,
ceph::Formatter* f)
{
assert(f);
f->open_object_section("time_event");
f->dump_string("name", name);
f->dump_stream("initiated_at") << timestamp;
f->close_section();
}
void dump_blocking_event(const char* name,
const utime_t& timestamp,
const Blocker* const blocker,
ceph::Formatter* f)
{
assert(f);
f->open_object_section("blocking_event");
f->dump_string("name", name);
f->dump_stream("initiated_at") << timestamp;
if (blocker) {
blocker->dump(f);
}
f->close_section();
}
} // namespace detail
} // namespace crimson
| 1,703 | 21.421053 | 70 | cc |
null | ceph-main/src/crimson/common/operation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <algorithm>
#include <array>
#include <set>
#include <vector>
#include <boost/core/demangle.hpp>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/shared_mutex.hh>
#include <seastar/core/future.hh>
#include <seastar/core/timer.hh>
#include <seastar/core/lowres_clock.hh>
#include <seastar/core/future-util.hh>
#include "include/ceph_assert.h"
#include "include/utime.h"
#include "common/Clock.h"
#include "common/Formatter.h"
#include "crimson/common/interruptible_future.h"
#include "crimson/common/smp_helpers.h"
#include "crimson/common/log.h"
namespace ceph {
class Formatter;
}
namespace crimson {
using registry_hook_t = boost::intrusive::list_member_hook<
boost::intrusive::link_mode<boost::intrusive::auto_unlink>>;
class Operation;
class Blocker;
namespace detail {
void dump_time_event(const char* name,
const utime_t& timestamp,
ceph::Formatter* f);
void dump_blocking_event(const char* name,
const utime_t& timestamp,
const Blocker* blocker,
ceph::Formatter* f);
} // namespace detail
/**
* Provides an interface for dumping diagnostic information about
* why a particular op is not making progress.
*/
class Blocker {
public:
void dump(ceph::Formatter *f) const;
virtual ~Blocker() = default;
private:
virtual void dump_detail(ceph::Formatter *f) const = 0;
virtual const char *get_type_name() const = 0;
};
// the main template. by default an operation has no extenral
// event handler (the empty tuple). specializing the template
// allows to define backends on per-operation-type manner.
// NOTE: basically this could be a function but C++ disallows
// differentiating return type among specializations.
template <class T>
struct EventBackendRegistry {
template <typename...> static constexpr bool always_false = false;
static std::tuple<> get_backends() {
static_assert(always_false<T>, "Registry specialization not found");
return {};
}
};
template <class T>
struct Event {
T* that() {
return static_cast<T*>(this);
}
const T* that() const {
return static_cast<const T*>(this);
}
template <class OpT, class... Args>
void trigger(OpT&& op, Args&&... args) {
that()->internal_backend.handle(*that(),
std::forward<OpT>(op),
std::forward<Args>(args)...);
// let's call `handle()` for concrete event type from each single
// of our backends. the order in the registry matters.
std::apply([&, //args=std::forward_as_tuple(std::forward<Args>(args)...),
this] (auto... backend) {
(..., backend.handle(*that(),
std::forward<OpT>(op),
std::forward<Args>(args)...));
}, EventBackendRegistry<std::decay_t<OpT>>::get_backends());
}
};
// simplest event type for recording things like beginning or end
// of TrackableOperation's life.
template <class T>
struct TimeEvent : Event<T> {
struct Backend {
// `T` is passed solely to let implementations to discriminate
// basing on the type-of-event.
virtual void handle(T&, const Operation&) = 0;
};
// for the sake of dumping ops-in-flight.
struct InternalBackend final : Backend {
void handle(T&, const Operation&) override {
timestamp = ceph_clock_now();
}
utime_t timestamp;
} internal_backend;
void dump(ceph::Formatter *f) const {
auto demangled_name = boost::core::demangle(typeid(T).name());
detail::dump_time_event(
demangled_name.c_str(),
internal_backend.timestamp, f);
}
auto get_timestamp() const {
return internal_backend.timestamp;
}
};
template <typename T>
class BlockerT : public Blocker {
public:
struct BlockingEvent : Event<typename T::BlockingEvent> {
using Blocker = std::decay_t<T>;
struct Backend {
// `T` is based solely to let implementations to discriminate
// basing on the type-of-event.
virtual void handle(typename T::BlockingEvent&, const Operation&, const T&) = 0;
};
struct InternalBackend : Backend {
void handle(typename T::BlockingEvent&,
const Operation&,
const T& blocker) override {
this->timestamp = ceph_clock_now();
this->blocker = &blocker;
}
utime_t timestamp;
const T* blocker;
} internal_backend;
// we don't want to make any BlockerT to be aware and coupled with
// an operation. to not templatize an entire path from an op to
// a blocker, type erasuring is used.
struct TriggerI {
TriggerI(BlockingEvent& event) : event(event) {}
template <class FutureT>
auto maybe_record_blocking(FutureT&& fut, const T& blocker) {
if (!fut.available()) {
// a full blown call via vtable. that's the cost for templatization
// avoidance. anyway, most of the things actually have the type
// knowledge.
record_blocking(blocker);
return std::forward<FutureT>(fut).finally(
[&event=this->event, &blocker] () mutable {
// beware trigger instance may be already dead when this
// is executed!
record_unblocking(event, blocker);
});
}
return std::forward<FutureT>(fut);
}
virtual ~TriggerI() = default;
protected:
// it's for the sake of erasing the OpT type
virtual void record_blocking(const T& blocker) = 0;
static void record_unblocking(BlockingEvent& event, const T& blocker) {
assert(event.internal_backend.blocker == &blocker);
event.internal_backend.blocker = nullptr;
}
BlockingEvent& event;
};
template <class OpT>
struct Trigger : TriggerI {
Trigger(BlockingEvent& event, const OpT& op) : TriggerI(event), op(op) {}
template <class FutureT>
auto maybe_record_blocking(FutureT&& fut, const T& blocker) {
if (!fut.available()) {
// no need for the dynamic dispatch! if we're lucky, a compiler
// should collapse all these abstractions into a bunch of movs.
this->Trigger::record_blocking(blocker);
return std::forward<FutureT>(fut).finally(
[&event=this->event, &blocker] () mutable {
Trigger::record_unblocking(event, blocker);
});
}
return std::forward<FutureT>(fut);
}
const OpT &get_op() { return op; }
protected:
void record_blocking(const T& blocker) override {
this->event.trigger(op, blocker);
}
const OpT& op;
};
void dump(ceph::Formatter *f) const {
auto demangled_name = boost::core::demangle(typeid(T).name());
detail::dump_blocking_event(
demangled_name.c_str(),
internal_backend.timestamp,
internal_backend.blocker,
f);
}
};
virtual ~BlockerT() = default;
template <class TriggerT, class... Args>
decltype(auto) track_blocking(TriggerT&& trigger, Args&&... args) {
return std::forward<TriggerT>(trigger).maybe_record_blocking(
std::forward<Args>(args)..., static_cast<const T&>(*this));
}
private:
const char *get_type_name() const final {
return static_cast<const T*>(this)->type_name;
}
};
template <class T>
struct AggregateBlockingEvent {
struct TriggerI {
protected:
struct TriggerContainerI {
virtual typename T::TriggerI& get_trigger() = 0;
virtual ~TriggerContainerI() = default;
};
using TriggerContainerIRef = std::unique_ptr<TriggerContainerI>;
virtual TriggerContainerIRef create_part_trigger() = 0;
public:
template <class FutureT>
auto maybe_record_blocking(FutureT&& fut,
const typename T::Blocker& blocker) {
// AggregateBlockingEvent is supposed to be used on relatively cold
// paths (recovery), so we don't need to worry about the dynamic
// polymothps / dynamic memory's overhead.
auto tcont = create_part_trigger();
return tcont->get_trigger().maybe_record_blocking(
std::move(fut), blocker
).finally([tcont=std::move(tcont)] {});
}
virtual ~TriggerI() = default;
};
template <class OpT>
struct Trigger final : TriggerI {
Trigger(AggregateBlockingEvent& event, const OpT& op)
: event(event), op(op) {}
class TriggerContainer final : public TriggerI::TriggerContainerI {
AggregateBlockingEvent& event;
typename decltype(event.events)::iterator iter;
typename T::template Trigger<OpT> trigger;
typename T::TriggerI &get_trigger() final {
return trigger;
}
public:
TriggerContainer(AggregateBlockingEvent& _event, const OpT& op) :
event(_event),
iter(event.events.emplace(event.events.end())),
trigger(*iter, op) {}
~TriggerContainer() final {
event.events.erase(iter);
}
};
protected:
typename TriggerI::TriggerContainerIRef create_part_trigger() final {
return std::make_unique<TriggerContainer>(event, op);
}
private:
AggregateBlockingEvent& event;
const OpT& op;
};
private:
std::list<T> events;
template <class OpT>
friend class Trigger;
};
/**
* Common base for all crimson-osd operations. Mainly provides
* an interface for registering ops in flight and dumping
* diagnostic information.
*/
class Operation : public boost::intrusive_ref_counter<
Operation, boost::thread_unsafe_counter> {
public:
using id_t = uint64_t;
static constexpr id_t NULL_ID = std::numeric_limits<uint64_t>::max();
id_t get_id() const {
return id;
}
static constexpr bool is_trackable = false;
virtual unsigned get_type() const = 0;
virtual const char *get_type_name() const = 0;
virtual void print(std::ostream &) const = 0;
void dump(ceph::Formatter *f) const;
void dump_brief(ceph::Formatter *f) const;
virtual ~Operation() = default;
private:
virtual void dump_detail(ceph::Formatter *f) const = 0;
registry_hook_t registry_hook;
id_t id = 0;
void set_id(id_t in_id) {
id = in_id;
}
friend class OperationRegistryI;
template <size_t>
friend class OperationRegistryT;
};
using OperationRef = boost::intrusive_ptr<Operation>;
std::ostream &operator<<(std::ostream &, const Operation &op);
/**
* Maintains a set of lists of all active ops.
*/
class OperationRegistryI {
using op_list_member_option = boost::intrusive::member_hook<
Operation,
registry_hook_t,
&Operation::registry_hook
>;
friend class Operation;
seastar::timer<seastar::lowres_clock> shutdown_timer;
seastar::promise<> shutdown;
protected:
virtual void do_register(Operation *op) = 0;
virtual bool registries_empty() const = 0;
virtual void do_stop() = 0;
public:
using op_list = boost::intrusive::list<
Operation,
op_list_member_option,
boost::intrusive::constant_time_size<false>>;
template <typename T, typename... Args>
auto create_operation(Args&&... args) {
boost::intrusive_ptr<T> op = new T(std::forward<Args>(args)...);
do_register(&*op);
return op;
}
seastar::future<> stop() {
crimson::get_logger(ceph_subsys_osd).info("OperationRegistryI::{}", __func__);
do_stop();
shutdown_timer.set_callback([this] {
if (registries_empty()) {
shutdown.set_value();
shutdown_timer.cancel();
}
});
shutdown_timer.arm_periodic(
std::chrono::milliseconds(100/*TODO: use option instead*/));
return shutdown.get_future();
}
};
template <size_t NUM_REGISTRIES>
class OperationRegistryT : public OperationRegistryI {
Operation::id_t next_id = 0;
std::array<
op_list,
NUM_REGISTRIES
> registries;
protected:
void do_register(Operation *op) final {
const auto op_type = op->get_type();
registries[op_type].push_back(*op);
op->set_id(++next_id);
}
bool registries_empty() const final {
return std::all_of(registries.begin(),
registries.end(),
[](auto& opl) {
return opl.empty();
});
}
protected:
OperationRegistryT(core_id_t core)
// Use core to initialize upper 8 bits of counters to ensure that
// ids generated by different cores are disjoint
: next_id(static_cast<id_t>(core) <<
(std::numeric_limits<id_t>::digits - 8))
{}
template <size_t REGISTRY_INDEX>
const op_list& get_registry() const {
static_assert(
REGISTRY_INDEX < std::tuple_size<decltype(registries)>::value);
return registries[REGISTRY_INDEX];
}
template <size_t REGISTRY_INDEX>
op_list& get_registry() {
static_assert(
REGISTRY_INDEX < std::tuple_size<decltype(registries)>::value);
return registries[REGISTRY_INDEX];
}
public:
/// Iterate over live ops
template <typename F>
void for_each_op(F &&f) const {
for (const auto ®istry: registries) {
for (const auto &op: registry) {
std::invoke(f, op);
}
}
}
/// Removes op from registry
void remove_from_registry(Operation &op) {
const auto op_type = op.get_type();
registries[op_type].erase(op_list::s_iterator_to(op));
}
/// Adds op to registry
void add_to_registry(Operation &op) {
const auto op_type = op.get_type();
registries[op_type].push_back(op);
}
};
class PipelineExitBarrierI {
public:
using Ref = std::unique_ptr<PipelineExitBarrierI>;
/// Waits for exit barrier
virtual std::optional<seastar::future<>> wait() = 0;
/// Releases pipeline stage, can only be called after wait
virtual void exit() = 0;
/// Releases pipeline resources without waiting on barrier
virtual void cancel() = 0;
/// Must ensure that resources are released, likely by calling cancel()
virtual ~PipelineExitBarrierI() {}
};
template <class T>
class PipelineStageIT : public BlockerT<T> {
const core_id_t core = seastar::this_shard_id();
public:
core_id_t get_core() const { return core; }
template <class... Args>
decltype(auto) enter(Args&&... args) {
return static_cast<T*>(this)->enter(std::forward<Args>(args)...);
}
};
class PipelineHandle {
PipelineExitBarrierI::Ref barrier;
std::optional<seastar::future<>> wait_barrier() {
return barrier ? barrier->wait() : std::nullopt;
}
public:
PipelineHandle() = default;
PipelineHandle(const PipelineHandle&) = delete;
PipelineHandle(PipelineHandle&&) = default;
PipelineHandle &operator=(const PipelineHandle&) = delete;
PipelineHandle &operator=(PipelineHandle&&) = default;
/**
* Returns a future which unblocks when the handle has entered the passed
* OrderedPipelinePhase. If already in a phase, enter will also release
* that phase after placing itself in the queue for the next one to preserve
* ordering.
*/
template <typename OpT, typename T>
seastar::future<>
enter(T &stage, typename T::BlockingEvent::template Trigger<OpT>&& t) {
ceph_assert(stage.get_core() == seastar::this_shard_id());
auto wait_fut = wait_barrier();
if (wait_fut.has_value()) {
return wait_fut.value().then([this, &stage, t=std::move(t)] () mutable {
auto fut = t.maybe_record_blocking(stage.enter(t), stage);
exit();
return std::move(fut).then(
[this, t=std::move(t)](auto &&barrier_ref) mutable {
barrier = std::move(barrier_ref);
return seastar::now();
});
});
} else {
auto fut = t.maybe_record_blocking(stage.enter(t), stage);
exit();
return std::move(fut).then(
[this, t=std::move(t)](auto &&barrier_ref) mutable {
barrier = std::move(barrier_ref);
return seastar::now();
});
}
}
/**
* Completes pending exit barrier without entering a new one.
*/
seastar::future<> complete() {
auto ret = wait_barrier();
barrier.reset();
return ret ? std::move(ret.value()) : seastar::now();
}
/**
* Exits current phase, skips exit barrier, should only be used for op
* failure. Permitting the handle to be destructed as the same effect.
*/
void exit() {
barrier.reset();
}
};
/**
* Ensures that at most one op may consider itself in the phase at a time.
* Ops will see enter() unblock in the order in which they tried to enter
* the phase. entering (though not necessarily waiting for the future to
* resolve) a new phase prior to exiting the previous one will ensure that
* the op ordering is preserved.
*/
template <class T>
class OrderedExclusivePhaseT : public PipelineStageIT<T> {
void dump_detail(ceph::Formatter *f) const final {
f->dump_unsigned("waiting", waiting);
if (held_by != Operation::NULL_ID) {
f->dump_unsigned("held_by_operation_id", held_by);
}
}
class ExitBarrier final : public PipelineExitBarrierI {
OrderedExclusivePhaseT *phase;
Operation::id_t op_id;
public:
ExitBarrier(OrderedExclusivePhaseT *phase, Operation::id_t id)
: phase(phase), op_id(id) {}
std::optional<seastar::future<>> wait() final {
return std::nullopt;
}
void exit() final {
if (phase) {
auto *p = phase;
auto id = op_id;
phase = nullptr;
std::ignore = seastar::smp::submit_to(
p->get_core(),
[p, id] {
p->exit(id);
});
}
}
void cancel() final {
exit();
}
~ExitBarrier() final {
cancel();
}
};
void exit(Operation::id_t op_id) {
clear_held_by(op_id);
mutex.unlock();
}
public:
template <class TriggerT>
seastar::future<PipelineExitBarrierI::Ref> enter(TriggerT& t) {
waiting++;
return mutex.lock().then([this, op_id=t.get_op().get_id()] {
ceph_assert_always(waiting > 0);
--waiting;
set_held_by(op_id);
return PipelineExitBarrierI::Ref(new ExitBarrier{this, op_id});
});
}
private:
void set_held_by(Operation::id_t id) {
ceph_assert_always(held_by == Operation::NULL_ID);
held_by = id;
}
void clear_held_by(Operation::id_t id) {
ceph_assert_always(held_by == id);
held_by = Operation::NULL_ID;
}
unsigned waiting = 0;
seastar::shared_mutex mutex;
Operation::id_t held_by = Operation::NULL_ID;
};
/**
* Permits multiple ops to inhabit the stage concurrently, but ensures that
* they will proceed to the next stage in the order in which they called
* enter.
*/
template <class T>
class OrderedConcurrentPhaseT : public PipelineStageIT<T> {
using base_t = PipelineStageIT<T>;
public:
struct BlockingEvent : base_t::BlockingEvent {
using base_t::BlockingEvent::BlockingEvent;
struct ExitBarrierEvent : TimeEvent<ExitBarrierEvent> {};
template <class OpT>
struct Trigger : base_t::BlockingEvent::template Trigger<OpT> {
using base_t::BlockingEvent::template Trigger<OpT>::Trigger;
template <class FutureT>
decltype(auto) maybe_record_exit_barrier(FutureT&& fut) {
if (!fut.available()) {
exit_barrier_event.trigger(this->op);
}
return std::forward<FutureT>(fut);
}
ExitBarrierEvent exit_barrier_event;
};
};
private:
void dump_detail(ceph::Formatter *f) const final {}
template <class TriggerT>
class ExitBarrier final : public PipelineExitBarrierI {
OrderedConcurrentPhaseT *phase;
std::optional<seastar::future<>> barrier;
TriggerT trigger;
public:
ExitBarrier(
OrderedConcurrentPhaseT *phase,
seastar::future<> &&barrier,
TriggerT& trigger) : phase(phase), barrier(std::move(barrier)), trigger(trigger) {}
std::optional<seastar::future<>> wait() final {
assert(phase);
assert(barrier);
auto ret = std::move(*barrier);
barrier = std::nullopt;
return trigger.maybe_record_exit_barrier(std::move(ret));
}
void exit() final {
if (barrier) {
static_cast<void>(
std::move(*barrier).then([phase=this->phase] { phase->mutex.unlock(); }));
barrier = std::nullopt;
phase = nullptr;
}
if (phase) {
std::ignore = seastar::smp::submit_to(
phase->get_core(),
[this] {
phase->mutex.unlock();
phase = nullptr;
});
}
}
void cancel() final {
exit();
}
~ExitBarrier() final {
cancel();
}
};
public:
template <class TriggerT>
seastar::future<PipelineExitBarrierI::Ref> enter(TriggerT& t) {
return seastar::make_ready_future<PipelineExitBarrierI::Ref>(
new ExitBarrier<TriggerT>{this, mutex.lock(), t});
}
private:
seastar::shared_mutex mutex;
};
/**
* Imposes no ordering or exclusivity at all. Ops enter without constraint and
* may exit in any order. Useful mainly for informational purposes between
* stages with constraints.
*/
template <class T>
class UnorderedStageT : public PipelineStageIT<T> {
void dump_detail(ceph::Formatter *f) const final {}
class ExitBarrier final : public PipelineExitBarrierI {
public:
ExitBarrier() = default;
std::optional<seastar::future<>> wait() final {
return std::nullopt;
}
void exit() final {}
void cancel() final {}
~ExitBarrier() final {}
};
public:
template <class... IgnoreArgs>
seastar::future<PipelineExitBarrierI::Ref> enter(IgnoreArgs&&...) {
return seastar::make_ready_future<PipelineExitBarrierI::Ref>(
new ExitBarrier);
}
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::Operation> : fmt::ostream_formatter {};
#endif
| 21,287 | 26.397683 | 89 | h |
null | ceph-main/src/crimson/common/perf_counters_collection.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_context.h"
#include "perf_counters_collection.h"
namespace crimson::common {
PerfCountersCollection::PerfCountersCollection()
{
perf_collection = std::make_unique<PerfCountersCollectionImpl>();
}
PerfCountersCollection::~PerfCountersCollection()
{
perf_collection->clear();
}
PerfCountersCollectionImpl* PerfCountersCollection:: get_perf_collection()
{
return perf_collection.get();
}
void PerfCountersCollection::dump_formatted(ceph::Formatter *f, bool schema,
bool dump_labeled,
const std::string &logger,
const std::string &counter)
{
perf_collection->dump_formatted(f, schema, dump_labeled, logger, counter);
}
PerfCountersCollection::ShardedPerfCountersCollection PerfCountersCollection::sharded_perf_coll;
void PerfCountersDeleter::operator()(PerfCounters* p) noexcept
{
if (cct) {
cct->get_perfcounters_collection()->remove(p);
}
delete p;
}
}
| 1,129 | 25.904762 | 96 | cc |
null | ceph-main/src/crimson/common/perf_counters_collection.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "common/perf_counters.h"
#include "include/common_fwd.h"
#include <seastar/core/sharded.hh>
using crimson::common::PerfCountersCollectionImpl;
namespace crimson::common {
class PerfCountersCollection: public seastar::sharded<PerfCountersCollection>
{
using ShardedPerfCountersCollection = seastar::sharded<PerfCountersCollection>;
private:
std::unique_ptr<PerfCountersCollectionImpl> perf_collection;
static ShardedPerfCountersCollection sharded_perf_coll;
friend PerfCountersCollection& local_perf_coll();
friend ShardedPerfCountersCollection& sharded_perf_coll();
public:
PerfCountersCollection();
~PerfCountersCollection();
PerfCountersCollectionImpl* get_perf_collection();
void dump_formatted(ceph::Formatter *f, bool schema, bool dump_labeled,
const std::string &logger = "",
const std::string &counter = "");
};
inline PerfCountersCollection::ShardedPerfCountersCollection& sharded_perf_coll(){
return PerfCountersCollection::sharded_perf_coll;
}
inline PerfCountersCollection& local_perf_coll() {
return PerfCountersCollection::sharded_perf_coll.local();
}
class PerfCountersDeleter {
CephContext* cct;
public:
PerfCountersDeleter() noexcept : cct(nullptr) {}
PerfCountersDeleter(CephContext* cct) noexcept : cct(cct) {}
void operator()(PerfCounters* p) noexcept;
};
}
using PerfCountersRef = std::unique_ptr<crimson::common::PerfCounters, crimson::common::PerfCountersDeleter>;
| 1,591 | 30.84 | 109 | h |
null | ceph-main/src/crimson/common/shared_lru.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <optional>
#include <boost/smart_ptr/local_shared_ptr.hpp>
#include <boost/smart_ptr/weak_ptr.hpp>
#include "simple_lru.h"
/// SharedLRU does its best to cache objects. It not only tracks the objects
/// in its LRU cache with strong references, it also tracks objects with
/// weak_ptr even if the cache does not hold any strong references to them. so
/// that it can return the objects after they are evicted, as long as they've
/// ever been cached and have not been destroyed yet.
template<class K, class V>
class SharedLRU {
using shared_ptr_t = boost::local_shared_ptr<V>;
using weak_ptr_t = boost::weak_ptr<V>;
using value_type = std::pair<K, shared_ptr_t>;
// weak_refs is already ordered, and we don't use accessors like
// LRUCache::lower_bound(), so unordered LRUCache would suffice.
SimpleLRU<K, shared_ptr_t, false> cache;
std::map<K, std::pair<weak_ptr_t, V*>> weak_refs;
struct Deleter {
SharedLRU<K,V>* cache;
const K key;
void operator()(V* ptr) {
cache->_erase_weak(key);
delete ptr;
}
};
void _erase_weak(const K& key) {
weak_refs.erase(key);
}
public:
SharedLRU(size_t max_size = 20)
: cache{max_size}
{}
~SharedLRU() {
cache.clear();
// initially, we were assuming that no pointer obtained from SharedLRU
// can outlive the lru itself. However, since going with the interruption
// concept for handling shutdowns, this is no longer valid.
weak_refs.clear();
}
/**
* Returns a reference to the given key, and perform an insertion if such
* key does not already exist
*/
shared_ptr_t operator[](const K& key);
/**
* Returns true iff there are no live references left to anything that has been
* in the cache.
*/
bool empty() const {
return weak_refs.empty();
}
size_t size() const {
return cache.size();
}
size_t capacity() const {
return cache.capacity();
}
/***
* Inserts a key if not present, or bumps it to the front of the LRU if
* it is, and then gives you a reference to the value. If the key already
* existed, you are responsible for deleting the new value you tried to
* insert.
*
* @param key The key to insert
* @param value The value that goes with the key
* @param existed Set to true if the value was already in the
* map, false otherwise
* @return A reference to the map's value for the given key
*/
shared_ptr_t insert(const K& key, std::unique_ptr<V> value);
// clear all strong reference from the lru.
void clear() {
cache.clear();
}
shared_ptr_t find(const K& key);
// return the last element that is not greater than key
shared_ptr_t lower_bound(const K& key);
// return the first element that is greater than key
std::optional<value_type> upper_bound(const K& key);
void erase(const K& key) {
cache.erase(key);
_erase_weak(key);
}
};
template<class K, class V>
typename SharedLRU<K,V>::shared_ptr_t
SharedLRU<K,V>::insert(const K& key, std::unique_ptr<V> value)
{
shared_ptr_t val;
if (auto found = weak_refs.find(key); found != weak_refs.end()) {
val = found->second.first.lock();
}
if (!val) {
val.reset(value.release(), Deleter{this, key});
weak_refs.emplace(key, std::make_pair(val, val.get()));
}
cache.insert(key, val);
return val;
}
template<class K, class V>
typename SharedLRU<K,V>::shared_ptr_t
SharedLRU<K,V>::operator[](const K& key)
{
if (auto found = cache.find(key); found) {
return *found;
}
shared_ptr_t val;
if (auto found = weak_refs.find(key); found != weak_refs.end()) {
val = found->second.first.lock();
}
if (!val) {
val.reset(new V{}, Deleter{this, key});
weak_refs.emplace(key, std::make_pair(val, val.get()));
}
cache.insert(key, val);
return val;
}
template<class K, class V>
typename SharedLRU<K,V>::shared_ptr_t
SharedLRU<K,V>::find(const K& key)
{
if (auto found = cache.find(key); found) {
return *found;
}
shared_ptr_t val;
if (auto found = weak_refs.find(key); found != weak_refs.end()) {
val = found->second.first.lock();
}
if (val) {
cache.insert(key, val);
}
return val;
}
template<class K, class V>
typename SharedLRU<K,V>::shared_ptr_t
SharedLRU<K,V>::lower_bound(const K& key)
{
if (weak_refs.empty()) {
return {};
}
auto found = weak_refs.lower_bound(key);
if (found == weak_refs.end()) {
--found;
}
if (auto val = found->second.first.lock(); val) {
cache.insert(key, val);
return val;
} else {
return {};
}
}
template<class K, class V>
std::optional<typename SharedLRU<K,V>::value_type>
SharedLRU<K,V>::upper_bound(const K& key)
{
for (auto found = weak_refs.upper_bound(key);
found != weak_refs.end();
++found) {
if (auto val = found->second.first.lock(); val) {
return std::make_pair(found->first, val);
}
}
return std::nullopt;
}
| 5,024 | 26.762431 | 81 | h |
null | ceph-main/src/crimson/common/simple_lru.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <list>
#include <map>
#include <optional>
#include <type_traits>
#include <unordered_map>
template <class Key, class Value, bool Ordered>
class SimpleLRU {
static_assert(std::is_default_constructible_v<Value>);
using list_type = std::list<Key>;
template<class K, class V>
using map_t = std::conditional_t<Ordered,
std::map<K, V>,
std::unordered_map<K, V>>;
using map_type = map_t<Key, std::pair<Value, typename list_type::iterator>>;
list_type lru;
map_type cache;
const size_t max_size;
public:
SimpleLRU(size_t size = 20)
: cache(size),
max_size(size)
{}
size_t size() const {
return cache.size();
}
size_t capacity() const {
return max_size;
}
using insert_return_type = std::pair<Value, bool>;
insert_return_type insert(const Key& key, Value value);
std::optional<Value> find(const Key& key);
std::optional<std::enable_if<Ordered, Value>> lower_bound(const Key& key);
void erase(const Key& key);
void clear();
private:
// bump the item to the front of the lru list
Value _lru_add(typename map_type::iterator found);
// evict the last element of most recently used list
void _evict();
};
template <class Key, class Value, bool Ordered>
typename SimpleLRU<Key,Value,Ordered>::insert_return_type
SimpleLRU<Key,Value,Ordered>::insert(const Key& key, Value value)
{
if constexpr(Ordered) {
auto found = cache.lower_bound(key);
if (found != cache.end() && found->first == key) {
// already exists
return {found->second.first, true};
} else {
if (size() >= capacity()) {
_evict();
}
lru.push_front(key);
// use lower_bound as hint to save the lookup
cache.emplace_hint(found, key, std::make_pair(value, lru.begin()));
return {std::move(value), false};
}
} else {
// cache is not ordered
auto found = cache.find(key);
if (found != cache.end()) {
// already exists
return {found->second.first, true};
} else {
if (size() >= capacity()) {
_evict();
}
lru.push_front(key);
cache.emplace(key, std::make_pair(value, lru.begin()));
return {std::move(value), false};
}
}
}
template <class Key, class Value, bool Ordered>
std::optional<Value> SimpleLRU<Key,Value,Ordered>::find(const Key& key)
{
if (auto found = cache.find(key); found != cache.end()){
return _lru_add(found);
} else {
return {};
}
}
template <class Key, class Value, bool Ordered>
std::optional<std::enable_if<Ordered, Value>>
SimpleLRU<Key,Value,Ordered>::lower_bound(const Key& key)
{
if (auto found = cache.lower_bound(key); found != cache.end()) {
return _lru_add(found);
} else {
return {};
}
}
template <class Key, class Value, bool Ordered>
void SimpleLRU<Key,Value,Ordered>::clear()
{
lru.clear();
cache.clear();
}
template <class Key, class Value, bool Ordered>
void SimpleLRU<Key,Value,Ordered>::erase(const Key& key)
{
if (auto found = cache.find(key); found != cache.end()) {
lru.erase(found->second.second);
cache.erase(found);
}
}
template <class Key, class Value, bool Ordered>
Value SimpleLRU<Key,Value,Ordered>::_lru_add(
typename SimpleLRU<Key,Value,Ordered>::map_type::iterator found)
{
auto& [value, in_lru] = found->second;
if (in_lru != lru.begin()){
// move item to the front
lru.splice(lru.begin(), lru, in_lru);
}
// the item is already at the front
return value;
}
template <class Key, class Value, bool Ordered>
void SimpleLRU<Key,Value,Ordered>::_evict()
{
// evict the last element of most recently used list
auto last = --lru.end();
cache.erase(*last);
lru.erase(last);
}
| 3,782 | 25.640845 | 78 | h |
null | ceph-main/src/crimson/common/smp_helpers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <limits>
#include <seastar/core/smp.hh>
#include "crimson/common/errorator.h"
#include "crimson/common/utility.h"
namespace crimson {
using core_id_t = seastar::shard_id;
static constexpr core_id_t NULL_CORE = std::numeric_limits<core_id_t>::max();
auto submit_to(core_id_t core, auto &&f) {
using ret_type = decltype(f());
if constexpr (is_errorated_future_v<ret_type>) {
auto ret = seastar::smp::submit_to(
core,
[f=std::move(f)]() mutable {
return f().to_base();
});
return ret_type(std::move(ret));
} else {
return seastar::smp::submit_to(core, std::move(f));
}
}
template <typename Obj, typename Method, typename... Args>
auto proxy_method_on_core(
core_id_t core, Obj &obj, Method method, Args&&... args) {
return crimson::submit_to(
core,
[&obj, method,
arg_tuple=std::make_tuple(std::forward<Args>(args)...)]() mutable {
return apply_method_to_tuple(obj, method, std::move(arg_tuple));
});
}
/**
* reactor_map_seq
*
* Invokes f on each reactor sequentially, Caller may assume that
* f will not be invoked concurrently on multiple cores.
*/
template <typename F>
auto reactor_map_seq(F &&f) {
using ret_type = decltype(f());
if constexpr (is_errorated_future_v<ret_type>) {
auto ret = crimson::do_for_each(
seastar::smp::all_cpus().begin(),
seastar::smp::all_cpus().end(),
[f=std::move(f)](auto core) mutable {
return seastar::smp::submit_to(
core,
[&f] {
return std::invoke(f);
});
});
return ret_type(ret);
} else {
return seastar::do_for_each(
seastar::smp::all_cpus().begin(),
seastar::smp::all_cpus().end(),
[f=std::move(f)](auto core) mutable {
return seastar::smp::submit_to(
core,
[&f] {
return std::invoke(f);
});
});
}
}
/**
* sharded_map_seq
*
* Invokes f on each shard of t sequentially. Caller may assume that
* f will not be invoked concurrently on multiple cores.
*/
template <typename T, typename F>
auto sharded_map_seq(T &t, F &&f) {
return reactor_map_seq(
[&t, f=std::forward<F>(f)]() mutable {
return std::invoke(f, t.local());
});
}
}
| 2,286 | 23.591398 | 77 | h |
null | ceph-main/src/crimson/common/throttle.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "throttle.h"
namespace crimson::common {
int64_t Throttle::take(int64_t c)
{
if (max == 0u) {
return 0;
}
count += c;
return count;
}
int64_t Throttle::put(int64_t c)
{
if (max == 0u) {
return 0;
}
if (!c) {
return count;
}
on_free_slots.signal();
count -= c;
return count;
}
seastar::future<> Throttle::get(size_t c)
{
if (max == 0u) {
return seastar::make_ready_future<>();
}
pending++;
return on_free_slots.wait([this, c] {
return !_should_wait(c);
}).then([this, c] {
pending--;
count += c;
return seastar::make_ready_future<>();
});
}
void Throttle::reset_max(size_t m) {
if (max == m) {
return;
}
if (m > max) {
on_free_slots.signal();
}
max = m;
}
bool Throttle::_should_wait(size_t c) const {
if (!max) {
return false;
}
return ((c <= max && count + c > max) || // normally stay under max
(c >= max && count > max)); // except for large c
}
} // namespace crimson::common
| 1,107 | 16.046154 | 72 | cc |
null | ceph-main/src/crimson/common/throttle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/condition-variable.hh>
// pull seastar::timer<...>::timer definitions. FIX SEASTAR or reactor.hh
// is obligatory and should be included everywhere?
#include <seastar/core/reactor.hh>
#include "common/ThrottleInterface.h"
namespace crimson::common {
class Throttle final : public ThrottleInterface {
size_t max = 0;
size_t count = 0;
size_t pending = 0;
// we cannot change the "count" of seastar::semaphore after it is created,
// so use condition_variable instead.
seastar::condition_variable on_free_slots;
public:
explicit Throttle(size_t m)
: max(m)
{}
int64_t take(int64_t c = 1) override;
int64_t put(int64_t c = 1) override;
seastar::future<> get(size_t c);
size_t get_current() const {
return count;
}
size_t get_max() const {
return max;
}
size_t get_pending() const {
return pending;
}
void reset_max(size_t m);
private:
bool _should_wait(size_t c) const;
};
} // namespace crimson::common
| 1,097 | 23.954545 | 76 | h |
null | ceph-main/src/crimson/common/tmap_helpers.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/common/tmap_helpers.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include "include/rados.h"
namespace detail {
#define decode_or_return(v, bp) \
try { \
::decode(v, bp); \
} catch (...) { \
return -EINVAL; \
}
class TMapContents {
std::map<std::string, bufferlist> keys;
bufferlist header;
public:
TMapContents() = default;
int decode(bufferlist::const_iterator &bliter) {
keys.clear();
header.clear();
if (bliter.end()) {
return 0;
}
decode_or_return(header, bliter);
__u32 num_keys;
decode_or_return(num_keys, bliter);
for (; num_keys > 0; --num_keys) {
std::string key;
decode_or_return(key, bliter);
decode_or_return(keys[key], bliter);
}
return 0;
}
bufferlist encode() {
bufferlist bl;
::encode(header, bl);
::encode(static_cast<__u32>(keys.size()), bl);
for (auto &[k, v]: keys) {
::encode(k, bl);
::encode(v, bl);
}
return bl;
}
int update(bufferlist::const_iterator in) {
while (!in.end()) {
__u8 op;
decode_or_return(op, in);
if (op == CEPH_OSD_TMAP_HDR) {
decode_or_return(header, in);
continue;
}
std::string key;
decode_or_return(key, in);
switch (op) {
case CEPH_OSD_TMAP_SET: {
decode_or_return(keys[key], in);
break;
}
case CEPH_OSD_TMAP_CREATE: {
if (keys.contains(key)) {
return -EEXIST;
}
decode_or_return(keys[key], in);
break;
}
case CEPH_OSD_TMAP_RM: {
auto kiter = keys.find(key);
if (kiter == keys.end()) {
return -ENOENT;
}
keys.erase(kiter);
break;
}
case CEPH_OSD_TMAP_RMSLOPPY: {
keys.erase(key);
break;
}
}
}
return 0;
}
int put(bufferlist::const_iterator in) {
return 0;
}
};
}
namespace crimson::common {
using do_tmap_up_ret = tl::expected<bufferlist, int>;
do_tmap_up_ret do_tmap_up(bufferlist::const_iterator in, bufferlist contents)
{
detail::TMapContents tmap;
auto bliter = contents.cbegin();
int r = tmap.decode(bliter);
if (r < 0) {
return tl::unexpected(r);
}
r = tmap.update(in);
if (r < 0) {
return tl::unexpected(r);
}
return tmap.encode();
}
using do_tmap_up_ret = tl::expected<bufferlist, int>;
do_tmap_up_ret do_tmap_put(bufferlist::const_iterator in)
{
detail::TMapContents tmap;
int r = tmap.decode(in);
if (r < 0) {
return tl::unexpected(r);
}
return tmap.encode();
}
}
| 2,583 | 18.575758 | 77 | cc |
null | ceph-main/src/crimson/common/tmap_helpers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/expected.hpp"
#include "include/buffer.h"
#include "include/encoding.h"
namespace crimson::common {
/**
* do_tmap_up
*
* Performs tmap update instructions encoded in buffer referenced by in.
*
* @param [in] in iterator to buffer containing encoded tmap update operations
* @param [in] contents current contents of object
* @return buffer containing new object contents,
* -EINVAL for decoding errors,
* -EEXIST for CEPH_OSD_TMAP_CREATE on a key that exists
* -ENOENT for CEPH_OSD_TMAP_RM on a key that does not exist
*/
using do_tmap_up_ret = tl::expected<bufferlist, int>;
do_tmap_up_ret do_tmap_up(bufferlist::const_iterator in, bufferlist contents);
/**
* do_tmap_put
*
* Validates passed buffer pointed to by in and returns resulting object buffer.
*
* @param [in] in iterator to buffer containing tmap encoding
* @return buffer containing validated tmap encoded by in
* -EINVAL for decoding errors,
*/
using do_tmap_up_ret = tl::expected<bufferlist, int>;
do_tmap_up_ret do_tmap_put(bufferlist::const_iterator in);
}
| 1,186 | 27.95122 | 80 | h |
null | ceph-main/src/crimson/common/tri_mutex.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "tri_mutex.h"
seastar::future<> read_lock::lock()
{
return static_cast<tri_mutex*>(this)->lock_for_read();
}
void read_lock::unlock()
{
static_cast<tri_mutex*>(this)->unlock_for_read();
}
seastar::future<> write_lock::lock()
{
return static_cast<tri_mutex*>(this)->lock_for_write(false);
}
void write_lock::unlock()
{
static_cast<tri_mutex*>(this)->unlock_for_write();
}
seastar::future<> excl_lock::lock()
{
return static_cast<tri_mutex*>(this)->lock_for_excl();
}
void excl_lock::unlock()
{
static_cast<tri_mutex*>(this)->unlock_for_excl();
}
seastar::future<> excl_lock_from_read::lock()
{
static_cast<tri_mutex*>(this)->promote_from_read();
return seastar::make_ready_future<>();
}
void excl_lock_from_read::unlock()
{
static_cast<tri_mutex*>(this)->demote_to_read();
}
seastar::future<> excl_lock_from_write::lock()
{
static_cast<tri_mutex*>(this)->promote_from_write();
return seastar::make_ready_future<>();
}
void excl_lock_from_write::unlock()
{
static_cast<tri_mutex*>(this)->demote_to_write();
}
seastar::future<> excl_lock_from_excl::lock()
{
return seastar::make_ready_future<>();
}
void excl_lock_from_excl::unlock()
{
}
tri_mutex::~tri_mutex()
{
assert(!is_acquired());
}
seastar::future<> tri_mutex::lock_for_read()
{
if (try_lock_for_read()) {
return seastar::make_ready_future<>();
}
waiters.emplace_back(seastar::promise<>(), type_t::read);
return waiters.back().pr.get_future();
}
bool tri_mutex::try_lock_for_read() noexcept
{
if (!writers && !exclusively_used && waiters.empty()) {
++readers;
return true;
} else {
return false;
}
}
void tri_mutex::unlock_for_read()
{
assert(readers > 0);
if (--readers == 0) {
wake();
}
}
void tri_mutex::promote_from_read()
{
assert(readers == 1);
--readers;
exclusively_used = true;
}
void tri_mutex::demote_to_read()
{
assert(exclusively_used);
exclusively_used = false;
++readers;
}
seastar::future<> tri_mutex::lock_for_write(bool greedy)
{
if (try_lock_for_write(greedy)) {
return seastar::make_ready_future<>();
}
waiters.emplace_back(seastar::promise<>(), type_t::write);
return waiters.back().pr.get_future();
}
bool tri_mutex::try_lock_for_write(bool greedy) noexcept
{
if (!readers && !exclusively_used) {
if (greedy || waiters.empty()) {
++writers;
return true;
}
}
return false;
}
void tri_mutex::unlock_for_write()
{
assert(writers > 0);
if (--writers == 0) {
wake();
}
}
void tri_mutex::promote_from_write()
{
assert(writers == 1);
--writers;
exclusively_used = true;
}
void tri_mutex::demote_to_write()
{
assert(exclusively_used);
exclusively_used = false;
++writers;
}
// for exclusive users
seastar::future<> tri_mutex::lock_for_excl()
{
if (try_lock_for_excl()) {
return seastar::make_ready_future<>();
}
waiters.emplace_back(seastar::promise<>(), type_t::exclusive);
return waiters.back().pr.get_future();
}
bool tri_mutex::try_lock_for_excl() noexcept
{
if (readers == 0u && writers == 0u && !exclusively_used) {
exclusively_used = true;
return true;
} else {
return false;
}
}
void tri_mutex::unlock_for_excl()
{
assert(exclusively_used);
exclusively_used = false;
wake();
}
bool tri_mutex::is_acquired() const
{
if (readers != 0u) {
return true;
} else if (writers != 0u) {
return true;
} else if (exclusively_used) {
return true;
} else {
return false;
}
}
void tri_mutex::wake()
{
assert(!readers && !writers && !exclusively_used);
type_t type = type_t::none;
while (!waiters.empty()) {
auto& waiter = waiters.front();
if (type == type_t::exclusive) {
break;
} if (type == type_t::none) {
type = waiter.type;
} else if (type != waiter.type) {
// to be woken in the next batch
break;
}
switch (type) {
case type_t::read:
++readers;
break;
case type_t::write:
++writers;
break;
case type_t::exclusive:
exclusively_used = true;
break;
default:
assert(0);
}
waiter.pr.set_value();
waiters.pop_front();
}
}
| 4,250 | 17.809735 | 72 | cc |
null | ceph-main/src/crimson/common/tri_mutex.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include <seastar/core/circular_buffer.hh>
class read_lock {
public:
seastar::future<> lock();
void unlock();
};
class write_lock {
public:
seastar::future<> lock();
void unlock();
};
class excl_lock {
public:
seastar::future<> lock();
void unlock();
};
// promote from read to excl
class excl_lock_from_read {
public:
seastar::future<> lock();
void unlock();
};
// promote from write to excl
class excl_lock_from_write {
public:
seastar::future<> lock();
void unlock();
};
// promote from excl to excl
class excl_lock_from_excl {
public:
seastar::future<> lock();
void unlock();
};
/// shared/exclusive mutual exclusion
///
/// this lock design uses reader and writer is entirely and completely
/// independent of the conventional reader/writer lock usage. Here, what we
/// mean is that we can pipeline reads, and we can pipeline writes, but we
/// cannot allow a read while writes are in progress or a write while reads are
/// in progress. Any rmw operation is therefore exclusive.
///
/// tri_mutex is based on seastar::shared_mutex, but instead of two kinds of
/// waiters, tri_mutex keeps track of three kinds of lock users:
/// - readers
/// - writers
/// - exclusive users
class tri_mutex : private read_lock,
write_lock,
excl_lock,
excl_lock_from_read,
excl_lock_from_write,
excl_lock_from_excl
{
public:
tri_mutex() = default;
~tri_mutex();
read_lock& for_read() {
return *this;
}
write_lock& for_write() {
return *this;
}
excl_lock& for_excl() {
return *this;
}
excl_lock_from_read& excl_from_read() {
return *this;
}
excl_lock_from_write& excl_from_write() {
return *this;
}
excl_lock_from_excl& excl_from_excl() {
return *this;
}
// for shared readers
seastar::future<> lock_for_read();
bool try_lock_for_read() noexcept;
void unlock_for_read();
void promote_from_read();
void demote_to_read();
unsigned get_readers() const {
return readers;
}
// for shared writers
seastar::future<> lock_for_write(bool greedy);
bool try_lock_for_write(bool greedy) noexcept;
void unlock_for_write();
void promote_from_write();
void demote_to_write();
unsigned get_writers() const {
return writers;
}
// for exclusive users
seastar::future<> lock_for_excl();
bool try_lock_for_excl() noexcept;
void unlock_for_excl();
bool is_excl_acquired() const {
return exclusively_used;
}
bool is_acquired() const;
/// pass the provided exception to any waiting waiters
template<typename Exception>
void abort(Exception ex) {
while (!waiters.empty()) {
auto& waiter = waiters.front();
waiter.pr.set_exception(std::make_exception_ptr(ex));
waiters.pop_front();
}
}
private:
void wake();
unsigned readers = 0;
unsigned writers = 0;
bool exclusively_used = false;
enum class type_t : uint8_t {
read,
write,
exclusive,
none,
};
struct waiter_t {
waiter_t(seastar::promise<>&& pr, type_t type)
: pr(std::move(pr)), type(type)
{}
seastar::promise<> pr;
type_t type;
};
seastar::circular_buffer<waiter_t> waiters;
friend class read_lock;
friend class write_lock;
friend class excl_lock;
friend class excl_lock_from_read;
friend class excl_lock_from_write;
friend class excl_lock_from_excl;
};
| 3,601 | 21.942675 | 79 | h |
null | ceph-main/src/crimson/common/type_helpers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "boost/intrusive_ptr.hpp"
template<typename T> using Ref = boost::intrusive_ptr<T>;
| 207 | 22.111111 | 70 | h |
null | ceph-main/src/crimson/common/utility.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <type_traits>
namespace _impl {
template <class T> struct always_false : std::false_type {};
};
template <class T>
void assert_moveable(T& t) {
// It's fine
}
template <class T>
void assert_moveable(const T& t) {
static_assert(_impl::always_false<T>::value, "unable to move-out from T");
}
namespace internal {
template <typename Obj, typename Method, typename ArgTuple, size_t... I>
static auto _apply_method_to_tuple(
Obj &obj, Method method, ArgTuple &&tuple,
std::index_sequence<I...>) {
return (obj.*method)(std::get<I>(std::forward<ArgTuple>(tuple))...);
}
}
template <typename Obj, typename Method, typename ArgTuple>
auto apply_method_to_tuple(Obj &obj, Method method, ArgTuple &&tuple) {
constexpr auto tuple_size = std::tuple_size_v<ArgTuple>;
return internal::_apply_method_to_tuple(
obj, method, std::forward<ArgTuple>(tuple),
std::make_index_sequence<tuple_size>());
}
| 1,050 | 25.948718 | 78 | h |
null | ceph-main/src/crimson/crush/CrushLocation.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "CrushLocation.h"
#include <vector>
#include <boost/algorithm/string/trim.hpp>
#include <seastar/util/process.hh>
#include <seastar/util/later.hh>
#include "crush/CrushWrapper.h"
#include "crimson/common/log.h"
#include "crimson/common/config_proxy.h"
static seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_crush);
}
using namespace crimson::common;
namespace crimson::crush {
seastar::future<> CrushLocation::update_from_conf()
{
auto crush_location = local_conf().get_val<std::string>("crush_location");
if (crush_location.length()) {
_parse(crush_location);
}
return seastar::now();
}
void CrushLocation::_parse(const std::string& s)
{
std::multimap<std::string, std::string> new_crush_location;
std::vector<std::string> lvec;
get_str_vec(s, ";, \t", lvec);
int r = CrushWrapper::parse_loc_multimap(lvec, &new_crush_location);
if (r < 0) {
logger().error("CrushWrapper::parse_loc_multimap error, keeping original\
crush_location {}", *this);
return;
}
loc.swap(new_crush_location);
logger().info("{}: crush_location is {}", __func__, *this);
return;
}
seastar::future<> CrushLocation::update_from_hook()
{
auto crush_location_hook = local_conf().get_val<std::string>("crush_location_hook");
if (crush_location_hook.length() == 0)
return seastar::now();
return seastar::file_exists(
crush_location_hook
).then([this] (bool result) {
if (!result) {
std::stringstream errmsg;
errmsg << "the user define crush location hook: "
<< local_conf().get_val<std::string>("crush_location_hook")
<< " is not exists.";
logger().error("{}", errmsg.str());
throw std::runtime_error(errmsg.str());
}
return seastar::file_accessible(
local_conf().get_val<std::string>("crush_location_hook"),
seastar::access_flags::execute
).then([this] (bool result) {
if (!result) {
std::stringstream errmsg;
errmsg << "the user define crush location hook: "
<< local_conf().get_val<std::string>("crush_location_hook")
<< " is not executable.";
logger().error("{}", errmsg.str());
throw std::runtime_error(errmsg.str());
}
seastar::experimental::spawn_parameters params = {
.argv = {
local_conf().get_val<std::string>("crush_location_hook"),
"--cluster",
local_conf()->cluster,
"--id",
local_conf()->name.get_id(),
"--type",
local_conf()->name.get_type_str()
}
};
return seastar::experimental::spawn_process(
local_conf().get_val<std::string>("crush_location_hook"),
std::move(params)
).then([this] (auto process) {
auto stdout = process.stdout();
return do_with(
std::move(process),
std::move(stdout),
[this](auto& process, auto& stdout)
{
return stdout.read().then([] (seastar::temporary_buffer<char> buf) {
auto out = std::string(buf.get(), buf.size());
boost::algorithm::trim_if(out, boost::algorithm::is_any_of(" \n\r\t"));
return seastar::make_ready_future<std::string>(std::move(out));
}).then([&process, this] (auto out) {
return process.wait(
).then([out = std::move(out), this] (auto wstatus) {
auto* exit_signal = std::get_if<seastar::experimental::process::wait_signaled>(&wstatus);
if (exit_signal != nullptr) {
std::stringstream errmsg;
errmsg << "the user define crush location hook: "
<< local_conf().get_val<std::string>("crush_location_hook")
<< " terminated, terminated signal is "
<< exit_signal->terminating_signal;
logger().error("{}", errmsg.str());
throw std::runtime_error(errmsg.str());
}
auto* exit_status = std::get_if<seastar::experimental::process::wait_exited>(&wstatus);
if (exit_status->exit_code != 0) {
std::stringstream errmsg;
errmsg << "the user define crush location hook: "
<< local_conf().get_val<std::string>("crush_location_hook")
<< " execute failed, exit_code is " << exit_status->exit_code;
logger().error("{}", errmsg.str());
throw std::runtime_error(errmsg.str());
} else {
_parse(out);
}
return seastar::now();
});
});
});
});
});
});
}
seastar::future<> CrushLocation::init_on_startup()
{
if (local_conf().get_val<std::string>("crush_location").length()) {
return update_from_conf();
}
if (local_conf().get_val<std::string>("crush_location_hook").length()) {
return update_from_hook();
}
// start with a sane default
char hostname[HOST_NAME_MAX + 1];
int r = gethostname(hostname, sizeof(hostname));
if (r < 0)
strcpy(hostname, "unknown_host");
// use short hostname
for (unsigned i=0; hostname[i]; ++i) {
if (hostname[i] == '.') {
hostname[i] = '\0';
break;
}
}
loc.clear();
loc.insert(std::make_pair<std::string, std::string>("host", hostname));
loc.insert(std::make_pair<std::string, std::string>("root", "default"));
return seastar::now();
}
std::multimap<std::string,std::string> CrushLocation::get_location() const
{
return loc;
}
std::ostream& operator<<(std::ostream& os, const CrushLocation& loc)
{
bool first = true;
for (auto& [type, pos] : loc.get_location()) {
if (first) {
first = false;
} else {
os << ", ";
}
os << '"' << type << '=' << pos << '"';
}
return os;
}
}
| 5,935 | 30.743316 | 103 | cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.