Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/mon/MonCap.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_MONCAP_H
#define CEPH_MONCAP_H
#include <ostream>
#include "include/common_fwd.h"
#include "include/types.h"
#include "common/entity_name.h"
#include "mds/mdstypes.h"
static const __u8 MON_CAP_R = (1 << 1); // read
static const __u8 MON_CAP_W = (1 << 2); // write
static const __u8 MON_CAP_X = (1 << 3); // execute
static const __u8 MON_CAP_ALL = MON_CAP_R | MON_CAP_W | MON_CAP_X;
static const __u8 MON_CAP_ANY = 0xff; // *
struct mon_rwxa_t {
__u8 val;
// cppcheck-suppress noExplicitConstructor
mon_rwxa_t(__u8 v = 0) : val(v) {}
mon_rwxa_t& operator=(__u8 v) {
val = v;
return *this;
}
operator __u8() const {
return val;
}
};
std::ostream& operator<<(std::ostream& out, const mon_rwxa_t& p);
struct StringConstraint {
enum MatchType {
MATCH_TYPE_NONE,
MATCH_TYPE_EQUAL,
MATCH_TYPE_PREFIX,
MATCH_TYPE_REGEX
};
MatchType match_type = MATCH_TYPE_NONE;
std::string value;
StringConstraint() {}
StringConstraint(MatchType match_type, std::string value)
: match_type(match_type), value(value) {
}
};
std::ostream& operator<<(std::ostream& out, const StringConstraint& c);
struct MonCapGrant {
/*
* A grant can come in one of five forms:
*
* - a blanket allow ('allow rw', 'allow *')
* - this will match against any service and the read/write/exec flags
* in the mon code. semantics of what X means are somewhat ad hoc.
*
* - a service allow ('allow service mds rw')
* - this will match against a specific service and the r/w/x flags.
*
* - a profile ('allow profile osd')
* - this will match against specific monitor-enforced semantics of what
* this type of user should need to do. examples include 'osd', 'mds',
* 'bootstrap-osd'.
*
* - a command ('allow command foo', 'allow command bar with arg1=val1 arg2 prefix val2')
* this includes the command name (the prefix string), and a set
* of key/value pairs that constrain use of that command. if no pairs
* are specified, any arguments are allowed; if a pair is specified, that
* argument must be present and equal or match a prefix.
*
* - an fs name ('allow fsname foo')
* - this will restrict access to MDSMaps in the FSMap to the provided
* fs name.
*/
std::string service;
std::string profile;
std::string command;
std::map<std::string, StringConstraint> command_args;
std::string fs_name;
// restrict by network
std::string network;
// these are filled in by parse_network(), called by MonCap::parse()
entity_addr_t network_parsed;
unsigned network_prefix = 0;
bool network_valid = true;
void parse_network();
mon_rwxa_t allow;
// explicit grants that a profile grant expands to; populated as
// needed by expand_profile() (via is_match()) and cached here.
mutable std::list<MonCapGrant> profile_grants;
void expand_profile(const EntityName& name) const;
MonCapGrant() : allow(0) {}
// cppcheck-suppress noExplicitConstructor
MonCapGrant(mon_rwxa_t a) : allow(a) {}
MonCapGrant(std::string s, mon_rwxa_t a) : service(std::move(s)), allow(a) {}
// cppcheck-suppress noExplicitConstructor
MonCapGrant(std::string c) : command(std::move(c)) {}
MonCapGrant(std::string c, std::string a, StringConstraint co) : command(std::move(c)) {
command_args[a] = co;
}
MonCapGrant(mon_rwxa_t a, std::string fsname) : fs_name(fsname), allow(a) {}
/**
* check if given request parameters match our constraints
*
* @param cct context
* @param name entity name
* @param service service (if any)
* @param command command (if any)
* @param command_args command args (if any)
* @return bits we allow
*/
mon_rwxa_t get_allowed(CephContext *cct,
EntityName name,
const std::string& service,
const std::string& command,
const std::map<std::string, std::string>& command_args) const;
bool is_allow_all() const {
return
allow == MON_CAP_ANY &&
service.length() == 0 &&
profile.length() == 0 &&
command.length() == 0 &&
fs_name.empty();
}
};
std::ostream& operator<<(std::ostream& out, const MonCapGrant& g);
struct MonCap {
std::string text;
std::vector<MonCapGrant> grants;
MonCap() {}
explicit MonCap(const std::vector<MonCapGrant> &g) : grants(g) {}
std::string get_str() const {
return text;
}
bool is_allow_all() const;
void set_allow_all();
bool parse(const std::string& str, std::ostream *err=NULL);
/**
* check if we are capable of something
*
* This method actually checks a description of a particular operation against
* what the capability has specified.
*
* @param service service name
* @param command command id
* @param command_args
* @param op_may_read whether the operation may need to read
* @param op_may_write whether the operation may need to write
* @param op_may_exec whether the operation may exec
* @return true if the operation is allowed, false otherwise
*/
bool is_capable(CephContext *cct,
EntityName name,
const std::string& service,
const std::string& command,
const std::map<std::string, std::string>& command_args,
bool op_may_read, bool op_may_write, bool op_may_exec,
const entity_addr_t& addr) const;
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<MonCap*>& ls);
std::vector<std::string> allowed_fs_names() const {
std::vector<std::string> ret;
for (auto& g : grants) {
if (not g.fs_name.empty()) {
ret.push_back(g.fs_name);
} else {
return {};
}
}
return ret;
}
bool fs_name_capable(const EntityName& ename, std::string_view fs_name,
__u8 mask) {
for (auto& g : grants) {
if (g.is_allow_all()) {
return true;
}
if ((g.fs_name.empty() || g.fs_name == fs_name) && (mask & g.allow)) {
return true;
}
g.expand_profile(ename);
for (auto& pg : g.profile_grants) {
if ((pg.service == "fs" || pg.service == "mds") &&
(pg.fs_name.empty() || pg.fs_name == fs_name) &&
(pg.allow & mask)) {
return true;
}
}
}
return false;
}
};
WRITE_CLASS_ENCODER(MonCap)
std::ostream& operator<<(std::ostream& out, const MonCap& cap);
#endif
| 6,560 | 27.776316 | 92 |
h
|
null |
ceph-main/src/mon/MonClient.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <algorithm>
#include <iterator>
#include <random>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/adaptor/filtered.hpp>
#include <boost/range/algorithm/copy.hpp>
#include <boost/range/algorithm_ext/copy_n.hpp>
#include "common/weighted_shuffle.h"
#include "include/random.h"
#include "include/scope_guard.h"
#include "include/stringify.h"
#include "messages/MMonGetMap.h"
#include "messages/MMonGetVersion.h"
#include "messages/MMonGetMap.h"
#include "messages/MMonGetVersionReply.h"
#include "messages/MMonMap.h"
#include "messages/MConfig.h"
#include "messages/MAuth.h"
#include "messages/MLogAck.h"
#include "messages/MAuthReply.h"
#include "messages/MMonCommand.h"
#include "messages/MMonCommandAck.h"
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "messages/MPing.h"
#include "messages/MMonSubscribe.h"
#include "messages/MMonSubscribeAck.h"
#include "common/errno.h"
#include "common/hostname.h"
#include "common/LogClient.h"
#include "MonClient.h"
#include "error_code.h"
#include "MonMap.h"
#include "auth/Auth.h"
#include "auth/KeyRing.h"
#include "auth/AuthClientHandler.h"
#include "auth/AuthRegistry.h"
#include "auth/RotatingKeyRing.h"
#define dout_subsys ceph_subsys_monc
#undef dout_prefix
#define dout_prefix *_dout << "monclient" << (_hunting() ? "(hunting)":"") << ": "
namespace bs = boost::system;
using std::string;
using namespace std::literals;
MonClient::MonClient(CephContext *cct_, boost::asio::io_context& service) :
Dispatcher(cct_),
AuthServer(cct_),
messenger(NULL),
timer(cct_, monc_lock),
service(service),
initialized(false),
log_client(NULL),
more_log_pending(false),
want_monmap(true),
had_a_connection(false),
reopen_interval_multiplier(
cct_->_conf.get_val<double>("mon_client_hunt_interval_min_multiple")),
last_mon_command_tid(0),
version_req_id(0)
{}
MonClient::~MonClient()
{
}
int MonClient::build_initial_monmap()
{
ldout(cct, 10) << __func__ << dendl;
int r = monmap.build_initial(cct, false, std::cerr);
ldout(cct,10) << "monmap:\n";
monmap.print(*_dout);
*_dout << dendl;
return r;
}
int MonClient::get_monmap()
{
ldout(cct, 10) << __func__ << dendl;
std::unique_lock l(monc_lock);
sub.want("monmap", 0, 0);
if (!_opened())
_reopen_session();
map_cond.wait(l, [this] { return !want_monmap; });
ldout(cct, 10) << __func__ << " done" << dendl;
return 0;
}
int MonClient::get_monmap_and_config()
{
ldout(cct, 10) << __func__ << dendl;
ceph_assert(!messenger);
int tries = 10;
cct->init_crypto();
auto shutdown_crypto = make_scope_guard([this] {
cct->shutdown_crypto();
});
int r = build_initial_monmap();
if (r < 0) {
lderr(cct) << __func__ << " cannot identify monitors to contact" << dendl;
return r;
}
messenger = Messenger::create_client_messenger(
cct, "temp_mon_client");
ceph_assert(messenger);
messenger->add_dispatcher_head(this);
messenger->start();
auto shutdown_msgr = make_scope_guard([this] {
messenger->shutdown();
messenger->wait();
delete messenger;
messenger = nullptr;
if (!monmap.fsid.is_zero()) {
cct->_conf.set_val("fsid", stringify(monmap.fsid));
}
});
want_bootstrap_config = true;
auto shutdown_config = make_scope_guard([this] {
std::unique_lock l(monc_lock);
want_bootstrap_config = false;
bootstrap_config.reset();
});
ceph::ref_t<MConfig> config;
while (tries-- > 0) {
r = init();
if (r < 0) {
return r;
}
r = authenticate(std::chrono::duration<double>(cct->_conf.get_val<std::chrono::seconds>("client_mount_timeout")).count());
if (r == -ETIMEDOUT) {
shutdown();
continue;
}
if (r < 0) {
break;
}
{
std::unique_lock l(monc_lock);
if (monmap.get_epoch() &&
!monmap.persistent_features.contains_all(
ceph::features::mon::FEATURE_MIMIC)) {
ldout(cct,10) << __func__ << " pre-mimic monitor, no config to fetch"
<< dendl;
r = 0;
break;
}
while ((!bootstrap_config || monmap.get_epoch() == 0) && r == 0) {
ldout(cct,20) << __func__ << " waiting for monmap|config" << dendl;
auto status = map_cond.wait_for(l, ceph::make_timespan(
cct->_conf->mon_client_hunt_interval));
if (status == std::cv_status::timeout) {
r = -ETIMEDOUT;
}
}
if (bootstrap_config) {
ldout(cct,10) << __func__ << " success" << dendl;
config = std::move(bootstrap_config);
r = 0;
break;
}
}
lderr(cct) << __func__ << " failed to get config" << dendl;
shutdown();
continue;
}
if (config) {
// apply the bootstrap config to ensure its applied prior to completing
// the bootstrap
cct->_conf.set_mon_vals(cct, config->config, config_cb);
}
shutdown();
return r;
}
/**
* Ping the monitor with id @p mon_id and set the resulting reply in
* the provided @p result_reply, if this last parameter is not NULL.
*
* So that we don't rely on the MonClient's default messenger, set up
* during connect(), we create our own messenger to comunicate with the
* specified monitor. This is advantageous in the following ways:
*
* - Isolate the ping procedure from the rest of the MonClient's operations,
* allowing us to not acquire or manage the big monc_lock, thus not
* having to block waiting for some other operation to finish before we
* can proceed.
* * for instance, we can ping mon.FOO even if we are currently hunting
* or blocked waiting for auth to complete with mon.BAR.
*
* - Ping a monitor prior to establishing a connection (using connect())
* and properly establish the MonClient's messenger. This frees us
* from dealing with the complex foo that happens in connect().
*
* We also don't rely on MonClient as a dispatcher for this messenger,
* unlike what happens with the MonClient's default messenger. This allows
* us to sandbox the whole ping, having it much as a separate entity in
* the MonClient class, considerably simplifying the handling and dispatching
* of messages without needing to consider monc_lock.
*
* Current drawback is that we will establish a messenger for each ping
* we want to issue, instead of keeping a single messenger instance that
* would be used for all pings.
*/
int MonClient::ping_monitor(const string &mon_id, string *result_reply)
{
ldout(cct, 10) << __func__ << dendl;
string new_mon_id;
if (monmap.contains("noname-"+mon_id)) {
new_mon_id = "noname-"+mon_id;
} else {
new_mon_id = mon_id;
}
if (new_mon_id.empty()) {
ldout(cct, 10) << __func__ << " specified mon id is empty!" << dendl;
return -EINVAL;
} else if (!monmap.contains(new_mon_id)) {
ldout(cct, 10) << __func__ << " no such monitor 'mon." << new_mon_id << "'"
<< dendl;
return -ENOENT;
}
// N.B. monc isn't initialized
auth_registry.refresh_config();
KeyRing keyring;
keyring.from_ceph_context(cct);
RotatingKeyRing rkeyring(cct, cct->get_module_type(), &keyring);
MonClientPinger *pinger = new MonClientPinger(cct,
&rkeyring,
result_reply);
Messenger *smsgr = Messenger::create_client_messenger(cct, "temp_ping_client");
smsgr->add_dispatcher_head(pinger);
smsgr->set_auth_client(pinger);
smsgr->start();
ConnectionRef con = smsgr->connect_to_mon(monmap.get_addrs(new_mon_id));
ldout(cct, 10) << __func__ << " ping mon." << new_mon_id
<< " " << con->get_peer_addr() << dendl;
pinger->mc.reset(new MonConnection(cct, con, 0, &auth_registry));
pinger->mc->start(monmap.get_epoch(), entity_name);
con->send_message(new MPing);
int ret = pinger->wait_for_reply(cct->_conf->mon_client_ping_timeout);
if (ret == 0) {
ldout(cct,10) << __func__ << " got ping reply" << dendl;
} else {
ret = -ret;
}
con->mark_down();
pinger->mc.reset();
smsgr->shutdown();
smsgr->wait();
delete smsgr;
delete pinger;
return ret;
}
bool MonClient::ms_dispatch(Message *m)
{
// we only care about these message types
switch (m->get_type()) {
case CEPH_MSG_MON_MAP:
case CEPH_MSG_AUTH_REPLY:
case CEPH_MSG_MON_SUBSCRIBE_ACK:
case CEPH_MSG_MON_GET_VERSION_REPLY:
case MSG_MON_COMMAND_ACK:
case MSG_COMMAND_REPLY:
case MSG_LOGACK:
case MSG_CONFIG:
break;
case CEPH_MSG_PING:
m->put();
return true;
default:
return false;
}
std::lock_guard lock(monc_lock);
if (!m->get_connection()->is_anon() &&
m->get_source().type() == CEPH_ENTITY_TYPE_MON) {
if (_hunting()) {
auto p = _find_pending_con(m->get_connection());
if (p == pending_cons.end()) {
// ignore any messages outside hunting sessions
ldout(cct, 10) << "discarding stray monitor message " << *m << dendl;
m->put();
return true;
}
} else if (!active_con || active_con->get_con() != m->get_connection()) {
// ignore any messages outside our session(s)
ldout(cct, 10) << "discarding stray monitor message " << *m << dendl;
m->put();
return true;
}
}
switch (m->get_type()) {
case CEPH_MSG_MON_MAP:
handle_monmap(static_cast<MMonMap*>(m));
if (passthrough_monmap) {
return false;
} else {
m->put();
}
break;
case CEPH_MSG_AUTH_REPLY:
handle_auth(static_cast<MAuthReply*>(m));
break;
case CEPH_MSG_MON_SUBSCRIBE_ACK:
handle_subscribe_ack(static_cast<MMonSubscribeAck*>(m));
break;
case CEPH_MSG_MON_GET_VERSION_REPLY:
handle_get_version_reply(static_cast<MMonGetVersionReply*>(m));
break;
case MSG_MON_COMMAND_ACK:
handle_mon_command_ack(static_cast<MMonCommandAck*>(m));
break;
case MSG_COMMAND_REPLY:
if (m->get_connection()->is_anon() &&
m->get_source().type() == CEPH_ENTITY_TYPE_MON) {
// this connection is from 'tell'... ignore everything except our command
// reply. (we'll get misc other message because we authenticated, but we
// don't need them.)
handle_command_reply(static_cast<MCommandReply*>(m));
return true;
}
// leave the message for another dispatch handler (e.g., Objecter)
return false;
case MSG_LOGACK:
if (log_client) {
log_client->handle_log_ack(static_cast<MLogAck*>(m));
m->put();
if (more_log_pending) {
send_log();
}
} else {
m->put();
}
break;
case MSG_CONFIG:
handle_config(static_cast<MConfig*>(m));
break;
}
return true;
}
void MonClient::send_log(bool flush)
{
if (log_client) {
auto lm = log_client->get_mon_log_message(flush);
if (lm)
_send_mon_message(std::move(lm));
more_log_pending = log_client->are_pending();
}
}
void MonClient::flush_log()
{
std::lock_guard l(monc_lock);
send_log();
}
/* Unlike all the other message-handling functions, we don't put away a reference
* because we want to support MMonMap passthrough to other Dispatchers. */
void MonClient::handle_monmap(MMonMap *m)
{
ldout(cct, 10) << __func__ << " " << *m << dendl;
auto con_addrs = m->get_source_addrs();
string old_name = monmap.get_name(con_addrs);
const auto old_epoch = monmap.get_epoch();
auto p = m->monmapbl.cbegin();
decode(monmap, p);
ldout(cct, 10) << " got monmap " << monmap.epoch
<< " from mon." << old_name
<< " (according to old e" << monmap.get_epoch() << ")"
<< dendl;
ldout(cct, 10) << "dump:\n";
monmap.print(*_dout);
*_dout << dendl;
if (old_epoch != monmap.get_epoch()) {
tried.clear();
}
if (old_name.size() == 0) {
ldout(cct,10) << " can't identify which mon we were connected to" << dendl;
_reopen_session();
} else {
auto new_name = monmap.get_name(con_addrs);
if (new_name.empty()) {
ldout(cct, 10) << "mon." << old_name << " at " << con_addrs
<< " went away" << dendl;
// can't find the mon we were talking to (above)
_reopen_session();
} else if (messenger->should_use_msgr2() &&
monmap.get_addrs(new_name).has_msgr2() &&
!con_addrs.has_msgr2()) {
ldout(cct,1) << " mon." << new_name << " has (v2) addrs "
<< monmap.get_addrs(new_name) << " but i'm connected to "
<< con_addrs << ", reconnecting" << dendl;
_reopen_session();
}
}
cct->set_mon_addrs(monmap);
sub.got("monmap", monmap.get_epoch());
map_cond.notify_all();
want_monmap = false;
if (authenticate_err == 1) {
_finish_auth(0);
}
}
void MonClient::handle_config(MConfig *m)
{
ldout(cct,10) << __func__ << " " << *m << dendl;
if (want_bootstrap_config) {
// get_monmap_and_config is waiting for config which it will apply
// synchronously
bootstrap_config = ceph::ref_t<MConfig>(m, false);
map_cond.notify_all();
return;
}
// Take the sledgehammer approach to ensuring we don't depend on
// anything in MonClient.
boost::asio::post(finish_strand,
[m, cct = boost::intrusive_ptr<CephContext>(cct),
config_notify_cb = config_notify_cb,
config_cb = config_cb]() {
cct->_conf.set_mon_vals(cct.get(), m->config, config_cb);
if (config_notify_cb) {
config_notify_cb();
}
m->put();
});
}
// ----------------------
int MonClient::init()
{
ldout(cct, 10) << __func__ << dendl;
entity_name = cct->_conf->name;
auth_registry.refresh_config();
std::lock_guard l(monc_lock);
keyring.reset(new KeyRing);
if (auth_registry.is_supported_method(messenger->get_mytype(),
CEPH_AUTH_CEPHX)) {
// this should succeed, because auth_registry just checked!
int r = keyring->from_ceph_context(cct);
if (r != 0) {
// but be somewhat graceful in case there was a race condition
lderr(cct) << "keyring not found" << dendl;
return r;
}
}
if (!auth_registry.any_supported_methods(messenger->get_mytype())) {
return -ENOENT;
}
rotating_secrets.reset(
new RotatingKeyRing(cct, cct->get_module_type(), keyring.get()));
initialized = true;
messenger->set_auth_client(this);
messenger->add_dispatcher_head(this);
timer.init();
schedule_tick();
cct->get_admin_socket()->register_command(
"rotate-key",
this,
"rotate live authentication key");
return 0;
}
void MonClient::shutdown()
{
ldout(cct, 10) << __func__ << dendl;
cct->get_admin_socket()->unregister_commands(this);
monc_lock.lock();
stopping = true;
while (!version_requests.empty()) {
ceph::async::post(std::move(version_requests.begin()->second),
monc_errc::shutting_down, 0, 0);
ldout(cct, 20) << __func__ << " canceling and discarding version request "
<< version_requests.begin()->first << dendl;
version_requests.erase(version_requests.begin());
}
while (!mon_commands.empty()) {
auto tid = mon_commands.begin()->first;
_cancel_mon_command(tid);
}
ldout(cct, 20) << __func__ << " discarding " << waiting_for_session.size()
<< " pending message(s)" << dendl;
waiting_for_session.clear();
active_con.reset();
pending_cons.clear();
auth.reset();
global_id = 0;
authenticate_err = 0;
authenticated = false;
monc_lock.unlock();
if (initialized) {
initialized = false;
}
monc_lock.lock();
timer.shutdown();
stopping = false;
monc_lock.unlock();
}
int MonClient::authenticate(double timeout)
{
std::unique_lock lock{monc_lock};
if (active_con) {
ldout(cct, 5) << "already authenticated" << dendl;
return 0;
}
sub.want("monmap", monmap.get_epoch() ? monmap.get_epoch() + 1 : 0, 0);
sub.want("config", 0, 0);
if (!_opened())
_reopen_session();
auto until = ceph::mono_clock::now();
until += ceph::make_timespan(timeout);
if (timeout > 0.0)
ldout(cct, 10) << "authenticate will time out at " << until << dendl;
while (!active_con && authenticate_err >= 0) {
if (timeout > 0.0) {
auto r = auth_cond.wait_until(lock, until);
if (r == std::cv_status::timeout && !active_con) {
ldout(cct, 0) << "authenticate timed out after " << timeout << dendl;
authenticate_err = -ETIMEDOUT;
}
} else {
auth_cond.wait(lock);
}
}
if (active_con) {
ldout(cct, 5) << __func__ << " success, global_id "
<< active_con->get_global_id() << dendl;
// active_con should not have been set if there was an error
ceph_assert(authenticate_err >= 0);
authenticated = true;
}
if (authenticate_err < 0 && auth_registry.no_keyring_disabled_cephx()) {
lderr(cct) << __func__ << " NOTE: no keyring found; disabled cephx authentication" << dendl;
}
return authenticate_err;
}
int MonClient::call(
std::string_view command,
const cmdmap_t& cmdmap,
const ceph::buffer::list &inbl,
ceph::Formatter *f,
std::ostream& errss,
ceph::buffer::list& out)
{
if (command == "rotate-key") {
CryptoKey key;
try {
key.decode_base64(inbl.to_str());
} catch (buffer::error& e) {
errss << "error decoding key: " << e.what();
return -EINVAL;
}
if (keyring) {
ldout(cct, 1) << "rotate live key for " << entity_name << dendl;
keyring->add(entity_name, key);
} else {
errss << "cephx not enabled; no key to rotate";
return -EINVAL;
}
}
return 0;
}
void MonClient::handle_auth(MAuthReply *m)
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
if (m->get_connection()->is_anon()) {
// anon connection, used for mon tell commands
for (auto& p : mon_commands) {
if (p.second->target_con == m->get_connection()) {
auto& mc = p.second->target_session;
int ret = mc->handle_auth(m, entity_name,
CEPH_ENTITY_TYPE_MON,
rotating_secrets.get());
(void)ret; // we don't care
break;
}
}
m->put();
return;
}
if (!_hunting()) {
std::swap(active_con->get_auth(), auth);
int ret = active_con->authenticate(m);
m->put();
std::swap(auth, active_con->get_auth());
if (global_id != active_con->get_global_id()) {
lderr(cct) << __func__ << " peer assigned me a different global_id: "
<< active_con->get_global_id() << dendl;
}
if (ret != -EAGAIN) {
_finish_auth(ret);
}
return;
}
// hunting
auto found = _find_pending_con(m->get_connection());
ceph_assert(found != pending_cons.end());
int auth_err = found->second.handle_auth(m, entity_name, want_keys,
rotating_secrets.get());
m->put();
if (auth_err == -EAGAIN) {
return;
}
if (auth_err) {
pending_cons.erase(found);
if (!pending_cons.empty()) {
// keep trying with pending connections
return;
}
// the last try just failed, give up.
} else {
auto& mc = found->second;
ceph_assert(mc.have_session());
active_con.reset(new MonConnection(std::move(mc)));
pending_cons.clear();
}
_finish_hunting(auth_err);
_finish_auth(auth_err);
}
void MonClient::_finish_auth(int auth_err)
{
ldout(cct,10) << __func__ << " " << auth_err << dendl;
authenticate_err = auth_err;
// _resend_mon_commands() could _reopen_session() if the connected mon is not
// the one the MonCommand is targeting.
if (!auth_err && active_con) {
ceph_assert(auth);
_check_auth_tickets();
} else if (auth_err == -EAGAIN && !active_con) {
ldout(cct,10) << __func__
<< " auth returned EAGAIN, reopening the session to try again"
<< dendl;
_reopen_session();
}
auth_cond.notify_all();
}
// ---------
void MonClient::send_mon_message(MessageRef m)
{
std::lock_guard l{monc_lock};
_send_mon_message(std::move(m));
}
void MonClient::_send_mon_message(MessageRef m)
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
if (active_con) {
auto cur_con = active_con->get_con();
ldout(cct, 10) << "_send_mon_message to mon."
<< monmap.get_name(cur_con->get_peer_addr())
<< " at " << cur_con->get_peer_addr() << dendl;
cur_con->send_message2(std::move(m));
} else {
waiting_for_session.push_back(std::move(m));
}
}
void MonClient::_reopen_session(int rank)
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
ldout(cct, 10) << __func__ << " rank " << rank << dendl;
active_con.reset();
pending_cons.clear();
authenticate_err = 1; // == in progress
_start_hunting();
if (rank >= 0) {
_add_conn(rank);
} else {
_add_conns();
}
// throw out old queued messages
waiting_for_session.clear();
// throw out version check requests
while (!version_requests.empty()) {
ceph::async::post(std::move(version_requests.begin()->second),
monc_errc::session_reset, 0, 0);
version_requests.erase(version_requests.begin());
}
for (auto& c : pending_cons) {
c.second.start(monmap.get_epoch(), entity_name);
}
if (sub.reload()) {
_renew_subs();
}
}
void MonClient::_add_conn(unsigned rank)
{
auto peer = monmap.get_addrs(rank);
auto conn = messenger->connect_to_mon(peer);
MonConnection mc(cct, conn, global_id, &auth_registry);
if (auth) {
mc.get_auth().reset(auth->clone());
}
pending_cons.insert(std::make_pair(peer, std::move(mc)));
ldout(cct, 10) << "picked mon." << monmap.get_name(rank)
<< " con " << conn
<< " addr " << peer
<< dendl;
}
void MonClient::_add_conns()
{
// collect the next batch of candidates who are listed right next to the ones
// already tried
auto get_next_batch = [this]() -> std::vector<unsigned> {
std::multimap<uint16_t, unsigned> ranks_by_priority;
boost::copy(
monmap.mon_info | boost::adaptors::filtered(
[this](auto& info) {
auto rank = monmap.get_rank(info.first);
return tried.count(rank) == 0;
}) | boost::adaptors::transformed(
[this](auto& info) {
auto rank = monmap.get_rank(info.first);
return std::make_pair(info.second.priority, rank);
}), std::inserter(ranks_by_priority, end(ranks_by_priority)));
if (ranks_by_priority.empty()) {
return {};
}
// only choose the monitors with lowest priority
auto cands = boost::make_iterator_range(
ranks_by_priority.equal_range(ranks_by_priority.begin()->first));
std::vector<unsigned> ranks;
boost::range::copy(cands | boost::adaptors::map_values,
std::back_inserter(ranks));
return ranks;
};
auto ranks = get_next_batch();
if (ranks.empty()) {
tried.clear(); // start over
ranks = get_next_batch();
}
ceph_assert(!ranks.empty());
if (ranks.size() > 1) {
std::vector<uint16_t> weights;
for (auto i : ranks) {
auto rank_name = monmap.get_name(i);
weights.push_back(monmap.get_weight(rank_name));
}
random_device_t rd;
if (std::accumulate(begin(weights), end(weights), 0u) == 0) {
std::shuffle(begin(ranks), end(ranks), std::mt19937{rd()});
} else {
weighted_shuffle(begin(ranks), end(ranks), begin(weights), end(weights),
std::mt19937{rd()});
}
}
ldout(cct, 10) << __func__ << " ranks=" << ranks << dendl;
unsigned n = cct->_conf->mon_client_hunt_parallel;
if (n == 0 || n > ranks.size()) {
n = ranks.size();
}
for (unsigned i = 0; i < n; i++) {
_add_conn(ranks[i]);
tried.insert(ranks[i]);
}
}
bool MonClient::ms_handle_reset(Connection *con)
{
std::lock_guard lock(monc_lock);
if (con->get_peer_type() != CEPH_ENTITY_TYPE_MON)
return false;
if (con->is_anon()) {
auto p = mon_commands.begin();
while (p != mon_commands.end()) {
auto cmd = p->second;
++p;
if (cmd->target_con == con) {
_send_command(cmd); // may retry or fail
break;
}
}
return true;
}
if (_hunting()) {
if (pending_cons.count(con->get_peer_addrs())) {
ldout(cct, 10) << __func__ << " hunted mon " << con->get_peer_addrs()
<< dendl;
} else {
ldout(cct, 10) << __func__ << " stray mon " << con->get_peer_addrs()
<< dendl;
}
return true;
} else {
if (active_con && con == active_con->get_con()) {
ldout(cct, 10) << __func__ << " current mon " << con->get_peer_addrs()
<< dendl;
_reopen_session();
return false;
} else {
ldout(cct, 10) << "ms_handle_reset stray mon " << con->get_peer_addrs()
<< dendl;
return true;
}
}
}
bool MonClient::_opened() const
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
return active_con || _hunting();
}
bool MonClient::_hunting() const
{
return !pending_cons.empty();
}
void MonClient::_start_hunting()
{
ceph_assert(!_hunting());
// adjust timeouts if necessary
if (!had_a_connection)
return;
reopen_interval_multiplier *= cct->_conf->mon_client_hunt_interval_backoff;
if (reopen_interval_multiplier >
cct->_conf->mon_client_hunt_interval_max_multiple) {
reopen_interval_multiplier =
cct->_conf->mon_client_hunt_interval_max_multiple;
}
}
void MonClient::_finish_hunting(int auth_err)
{
ldout(cct,10) << __func__ << " " << auth_err << dendl;
ceph_assert(ceph_mutex_is_locked(monc_lock));
// the pending conns have been cleaned.
ceph_assert(!_hunting());
if (active_con) {
auto con = active_con->get_con();
ldout(cct, 1) << "found mon."
<< monmap.get_name(con->get_peer_addr())
<< dendl;
} else {
ldout(cct, 1) << "no mon sessions established" << dendl;
}
had_a_connection = true;
_un_backoff();
if (!auth_err) {
last_rotating_renew_sent = utime_t();
while (!waiting_for_session.empty()) {
_send_mon_message(std::move(waiting_for_session.front()));
waiting_for_session.pop_front();
}
_resend_mon_commands();
send_log(true);
if (active_con) {
auth = std::move(active_con->get_auth());
if (global_id && global_id != active_con->get_global_id()) {
lderr(cct) << __func__ << " global_id changed from " << global_id
<< " to " << active_con->get_global_id() << dendl;
}
global_id = active_con->get_global_id();
}
}
}
void MonClient::tick()
{
ldout(cct, 10) << __func__ << dendl;
utime_t now = ceph_clock_now();
auto reschedule_tick = make_scope_guard([this] {
schedule_tick();
});
_check_auth_tickets();
_check_tell_commands();
if (_hunting()) {
ldout(cct, 1) << "continuing hunt" << dendl;
return _reopen_session();
} else if (active_con) {
// just renew as needed
auto cur_con = active_con->get_con();
if (!cur_con->has_feature(CEPH_FEATURE_MON_STATEFUL_SUB)) {
const bool maybe_renew = sub.need_renew();
ldout(cct, 10) << "renew subs? -- " << (maybe_renew ? "yes" : "no")
<< dendl;
if (maybe_renew) {
_renew_subs();
}
}
if (now > last_keepalive + cct->_conf->mon_client_ping_interval) {
cur_con->send_keepalive();
last_keepalive = now;
if (cct->_conf->mon_client_ping_timeout > 0 &&
cur_con->has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) {
utime_t lk = cur_con->get_last_keepalive_ack();
utime_t interval = now - lk;
if (interval > cct->_conf->mon_client_ping_timeout) {
ldout(cct, 1) << "no keepalive since " << lk << " (" << interval
<< " seconds), reconnecting" << dendl;
return _reopen_session();
}
}
_un_backoff();
}
if (now > last_send_log + cct->_conf->mon_client_log_interval) {
send_log();
last_send_log = now;
}
}
}
void MonClient::_un_backoff()
{
// un-backoff our reconnect interval
reopen_interval_multiplier = std::max(
cct->_conf.get_val<double>("mon_client_hunt_interval_min_multiple"),
reopen_interval_multiplier /
cct->_conf.get_val<double>("mon_client_hunt_interval_backoff"));
ldout(cct, 20) << __func__ << " reopen_interval_multipler now "
<< reopen_interval_multiplier << dendl;
}
void MonClient::schedule_tick()
{
auto do_tick = make_lambda_context([this](int) { tick(); });
if (!is_connected()) {
// start another round of hunting
const auto hunt_interval = (cct->_conf->mon_client_hunt_interval *
reopen_interval_multiplier);
timer.add_event_after(hunt_interval, do_tick);
} else {
// keep in touch
timer.add_event_after(std::min(cct->_conf->mon_client_ping_interval,
cct->_conf->mon_client_log_interval),
do_tick);
}
}
// ---------
void MonClient::_renew_subs()
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
if (!sub.have_new()) {
ldout(cct, 10) << __func__ << " - empty" << dendl;
return;
}
ldout(cct, 10) << __func__ << dendl;
if (!_opened())
_reopen_session();
else {
auto m = ceph::make_message<MMonSubscribe>();
m->what = sub.get_subs();
m->hostname = ceph_get_short_hostname();
_send_mon_message(std::move(m));
sub.renewed();
}
}
void MonClient::handle_subscribe_ack(MMonSubscribeAck *m)
{
sub.acked(m->interval);
m->put();
}
int MonClient::_check_auth_tickets()
{
ldout(cct, 10) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(monc_lock));
if (active_con && auth) {
if (auth->need_tickets()) {
ldout(cct, 10) << __func__ << " getting new tickets!" << dendl;
auto m = ceph::make_message<MAuth>();
m->protocol = auth->get_protocol();
auth->prepare_build_request();
auth->build_request(m->auth_payload);
_send_mon_message(m);
}
_check_auth_rotating();
}
return 0;
}
int MonClient::_check_auth_rotating()
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
if (!rotating_secrets ||
!auth_principal_needs_rotating_keys(entity_name)) {
ldout(cct, 20) << "_check_auth_rotating not needed by " << entity_name << dendl;
return 0;
}
if (!active_con || !auth) {
ldout(cct, 10) << "_check_auth_rotating waiting for auth session" << dendl;
return 0;
}
utime_t now = ceph_clock_now();
utime_t cutoff = now;
cutoff -= std::min(30.0, cct->_conf->auth_service_ticket_ttl / 4.0);
utime_t issued_at_lower_bound = now;
issued_at_lower_bound -= cct->_conf->auth_service_ticket_ttl;
if (!rotating_secrets->need_new_secrets(cutoff)) {
ldout(cct, 10) << "_check_auth_rotating have uptodate secrets (they expire after " << cutoff << ")" << dendl;
rotating_secrets->dump_rotating();
return 0;
}
ldout(cct, 10) << "_check_auth_rotating renewing rotating keys (they expired before " << cutoff << ")" << dendl;
if (!rotating_secrets->need_new_secrets() &&
rotating_secrets->need_new_secrets(issued_at_lower_bound)) {
// the key has expired before it has been issued?
lderr(cct) << __func__ << " possible clock skew, rotating keys expired way too early"
<< " (before " << issued_at_lower_bound << ")" << dendl;
}
if ((now > last_rotating_renew_sent) &&
double(now - last_rotating_renew_sent) < 1) {
ldout(cct, 10) << __func__ << " called too often (last: "
<< last_rotating_renew_sent << "), skipping refresh" << dendl;
return 0;
}
auto m = ceph::make_message<MAuth>();
m->protocol = auth->get_protocol();
if (auth->build_rotating_request(m->auth_payload)) {
last_rotating_renew_sent = now;
_send_mon_message(std::move(m));
}
return 0;
}
int MonClient::wait_auth_rotating(double timeout)
{
std::unique_lock l(monc_lock);
// Must be initialized
ceph_assert(auth != nullptr);
if (auth->get_protocol() == CEPH_AUTH_NONE)
return 0;
if (!rotating_secrets)
return 0;
ldout(cct, 10) << __func__ << " waiting for " << timeout << dendl;
utime_t cutoff = ceph_clock_now();
cutoff -= std::min(30.0, cct->_conf->auth_service_ticket_ttl / 4.0);
if (auth_cond.wait_for(l, ceph::make_timespan(timeout), [this, cutoff] {
return (!auth_principal_needs_rotating_keys(entity_name) ||
!rotating_secrets->need_new_secrets(cutoff));
})) {
ldout(cct, 10) << __func__ << " done" << dendl;
return 0;
} else {
ldout(cct, 0) << __func__ << " timed out after " << timeout << dendl;
return -ETIMEDOUT;
}
}
// ---------
void MonClient::_send_command(MonCommand *r)
{
if (r->is_tell()) {
++r->send_attempts;
if (r->send_attempts > cct->_conf->mon_client_directed_command_retry) {
_finish_command(r, monc_errc::mon_unavailable, "mon unavailable", {});
return;
}
// tell-style command
if (monmap.min_mon_release >= ceph_release_t::octopus) {
if (r->target_con) {
r->target_con->mark_down();
}
if (r->target_rank >= 0) {
if (r->target_rank >= (int)monmap.size()) {
ldout(cct, 10) << " target " << r->target_rank
<< " >= max mon " << monmap.size() << dendl;
_finish_command(r, monc_errc::rank_dne, "mon rank dne"sv, {});
return;
}
r->target_con = messenger->connect_to_mon(
monmap.get_addrs(r->target_rank), true /* anon */);
} else {
if (!monmap.contains(r->target_name)) {
ldout(cct, 10) << " target " << r->target_name
<< " not present in monmap" << dendl;
_finish_command(r, monc_errc::mon_dne, "mon dne"sv, {});
return;
}
r->target_con = messenger->connect_to_mon(
monmap.get_addrs(r->target_name), true /* anon */);
}
r->target_session.reset(new MonConnection(cct, r->target_con, 0,
&auth_registry));
r->target_session->start(monmap.get_epoch(), entity_name);
r->last_send_attempt = ceph_clock_now();
MCommand *m = new MCommand(monmap.fsid);
m->set_tid(r->tid);
m->cmd = r->cmd;
m->set_data(r->inbl);
r->target_session->queue_command(m);
return;
}
// ugly legacy handling of pre-octopus mons
entity_addr_t peer;
if (active_con) {
peer = active_con->get_con()->get_peer_addr();
}
if (r->target_rank >= 0 &&
r->target_rank != monmap.get_rank(peer)) {
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd
<< " wants rank " << r->target_rank
<< ", reopening session"
<< dendl;
if (r->target_rank >= (int)monmap.size()) {
ldout(cct, 10) << " target " << r->target_rank
<< " >= max mon " << monmap.size() << dendl;
_finish_command(r, monc_errc::rank_dne, "mon rank dne"sv, {});
return;
}
_reopen_session(r->target_rank);
return;
}
if (r->target_name.length() &&
r->target_name != monmap.get_name(peer)) {
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd
<< " wants mon " << r->target_name
<< ", reopening session"
<< dendl;
if (!monmap.contains(r->target_name)) {
ldout(cct, 10) << " target " << r->target_name
<< " not present in monmap" << dendl;
_finish_command(r, monc_errc::mon_dne, "mon dne"sv, {});
return;
}
_reopen_session(monmap.get_rank(r->target_name));
return;
}
// fall-thru to send 'normal' CLI command
}
// normal CLI command
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd << dendl;
auto m = ceph::make_message<MMonCommand>(monmap.fsid);
m->set_tid(r->tid);
m->cmd = r->cmd;
m->set_data(r->inbl);
_send_mon_message(std::move(m));
return;
}
void MonClient::_check_tell_commands()
{
// resend any requests
auto now = ceph_clock_now();
auto p = mon_commands.begin();
while (p != mon_commands.end()) {
auto cmd = p->second;
++p;
if (cmd->is_tell() &&
cmd->last_send_attempt != utime_t() &&
now - cmd->last_send_attempt > cct->_conf->mon_client_hunt_interval) {
ldout(cct,5) << __func__ << " timeout tell command " << cmd->tid << dendl;
_send_command(cmd); // might remove cmd from mon_commands
}
}
}
void MonClient::_resend_mon_commands()
{
// resend any requests
auto p = mon_commands.begin();
while (p != mon_commands.end()) {
auto cmd = p->second;
++p;
if (cmd->is_tell() && monmap.min_mon_release >= ceph_release_t::octopus) {
// starting with octopus, tell commands use their own connetion and need no
// special resend when we finish hunting.
} else {
_send_command(cmd); // might remove cmd from mon_commands
}
}
}
void MonClient::handle_mon_command_ack(MMonCommandAck *ack)
{
MonCommand *r = NULL;
uint64_t tid = ack->get_tid();
if (tid == 0 && !mon_commands.empty()) {
r = mon_commands.begin()->second;
ldout(cct, 10) << __func__ << " has tid 0, assuming it is " << r->tid << dendl;
} else {
auto p = mon_commands.find(tid);
if (p == mon_commands.end()) {
ldout(cct, 10) << __func__ << " " << ack->get_tid() << " not found" << dendl;
ack->put();
return;
}
r = p->second;
}
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd << dendl;
auto ec = ack->r < 0 ? bs::error_code(-ack->r, mon_category())
: bs::error_code();
_finish_command(r, ec, ack->rs,
std::move(ack->get_data()));
ack->put();
}
void MonClient::handle_command_reply(MCommandReply *reply)
{
MonCommand *r = NULL;
uint64_t tid = reply->get_tid();
if (tid == 0 && !mon_commands.empty()) {
r = mon_commands.begin()->second;
ldout(cct, 10) << __func__ << " has tid 0, assuming it is " << r->tid
<< dendl;
} else {
auto p = mon_commands.find(tid);
if (p == mon_commands.end()) {
ldout(cct, 10) << __func__ << " " << reply->get_tid() << " not found"
<< dendl;
reply->put();
return;
}
r = p->second;
}
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd << dendl;
auto ec = reply->r < 0 ? bs::error_code(-reply->r, mon_category())
: bs::error_code();
_finish_command(r, ec, reply->rs, std::move(reply->get_data()));
reply->put();
}
int MonClient::_cancel_mon_command(uint64_t tid)
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
auto it = mon_commands.find(tid);
if (it == mon_commands.end()) {
ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
return -ENOENT;
}
ldout(cct, 10) << __func__ << " tid " << tid << dendl;
MonCommand *cmd = it->second;
_finish_command(cmd, monc_errc::timed_out, "timed out"sv, {});
return 0;
}
void MonClient::_finish_command(MonCommand *r, bs::error_code ret,
std::string_view rs, ceph::buffer::list&& bl)
{
ldout(cct, 10) << __func__ << " " << r->tid << " = " << ret << " " << rs
<< dendl;
ceph::async::post(std::move(r->onfinish), ret, std::string(rs),
std::move(bl));
if (r->target_con) {
r->target_con->mark_down();
}
mon_commands.erase(r->tid);
delete r;
}
// ---------
void MonClient::handle_get_version_reply(MMonGetVersionReply* m)
{
ceph_assert(ceph_mutex_is_locked(monc_lock));
auto iter = version_requests.find(m->handle);
if (iter == version_requests.end()) {
ldout(cct, 0) << __func__ << " version request with handle " << m->handle
<< " not found" << dendl;
} else {
auto req = std::move(iter->second);
ldout(cct, 10) << __func__ << " finishing " << iter->first << " version "
<< m->version << dendl;
version_requests.erase(iter);
ceph::async::post(std::move(req), bs::error_code(),
m->version, m->oldest_version);
}
m->put();
}
int MonClient::get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *auth_method,
std::vector<uint32_t> *preferred_modes,
ceph::buffer::list *bl)
{
std::lock_guard l(monc_lock);
ldout(cct,10) << __func__ << " con " << con << " auth_method " << *auth_method
<< dendl;
// connection to mon?
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
ceph_assert(!auth_meta->authorizer);
if (con->is_anon()) {
for (auto& i : mon_commands) {
if (i.second->target_con == con) {
return i.second->target_session->get_auth_request(
auth_method, preferred_modes, bl,
entity_name, want_keys, rotating_secrets.get());
}
}
}
for (auto& i : pending_cons) {
if (i.second.is_con(con)) {
return i.second.get_auth_request(
auth_method, preferred_modes, bl,
entity_name, want_keys, rotating_secrets.get());
}
}
return -ENOENT;
}
// generate authorizer
if (!auth) {
lderr(cct) << __func__ << " but no auth handler is set up" << dendl;
return -EACCES;
}
auth_meta->authorizer.reset(auth->build_authorizer(con->get_peer_type()));
if (!auth_meta->authorizer) {
lderr(cct) << __func__ << " failed to build_authorizer for type "
<< ceph_entity_type_name(con->get_peer_type()) << dendl;
return -EACCES;
}
auth_meta->auth_method = auth_meta->authorizer->protocol;
auth_registry.get_supported_modes(con->get_peer_type(),
auth_meta->auth_method,
preferred_modes);
*bl = auth_meta->authorizer->bl;
return 0;
}
int MonClient::handle_auth_reply_more(
Connection *con,
AuthConnectionMeta *auth_meta,
const ceph::buffer::list& bl,
ceph::buffer::list *reply)
{
std::lock_guard l(monc_lock);
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
if (con->is_anon()) {
for (auto& i : mon_commands) {
if (i.second->target_con == con) {
return i.second->target_session->handle_auth_reply_more(
auth_meta, bl, reply);
}
}
}
for (auto& i : pending_cons) {
if (i.second.is_con(con)) {
return i.second.handle_auth_reply_more(auth_meta, bl, reply);
}
}
return -ENOENT;
}
// authorizer challenges
if (!auth || !auth_meta->authorizer) {
lderr(cct) << __func__ << " no authorizer?" << dendl;
return -1;
}
auth_meta->authorizer->add_challenge(cct, bl);
*reply = auth_meta->authorizer->bl;
return 0;
}
int MonClient::handle_auth_done(
Connection *con,
AuthConnectionMeta *auth_meta,
uint64_t global_id,
uint32_t con_mode,
const ceph::buffer::list& bl,
CryptoKey *session_key,
std::string *connection_secret)
{
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
std::lock_guard l(monc_lock);
if (con->is_anon()) {
for (auto& i : mon_commands) {
if (i.second->target_con == con) {
return i.second->target_session->handle_auth_done(
auth_meta, global_id, bl,
session_key, connection_secret);
}
}
}
for (auto& i : pending_cons) {
if (i.second.is_con(con)) {
int r = i.second.handle_auth_done(
auth_meta, global_id, bl,
session_key, connection_secret);
if (r) {
pending_cons.erase(i.first);
if (!pending_cons.empty()) {
return r;
}
} else {
active_con.reset(new MonConnection(std::move(i.second)));
pending_cons.clear();
ceph_assert(active_con->have_session());
}
_finish_hunting(r);
if (r || monmap.get_epoch() > 0) {
_finish_auth(r);
}
return r;
}
}
return -ENOENT;
} else {
// verify authorizer reply
auto p = bl.begin();
if (!auth_meta->authorizer->verify_reply(p, &auth_meta->connection_secret)) {
ldout(cct, 0) << __func__ << " failed verifying authorizer reply"
<< dendl;
return -EACCES;
}
auth_meta->session_key = auth_meta->authorizer->session_key;
return 0;
}
}
int MonClient::handle_auth_bad_method(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes)
{
auth_meta->allowed_methods = allowed_methods;
std::lock_guard l(monc_lock);
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
if (con->is_anon()) {
for (auto& i : mon_commands) {
if (i.second->target_con == con) {
int r = i.second->target_session->handle_auth_bad_method(
old_auth_method,
result,
allowed_methods,
allowed_modes);
if (r < 0) {
auto ec = bs::error_code(-r, mon_category());
_finish_command(i.second, ec, "auth failed"sv, {});
}
return r;
}
}
}
for (auto& i : pending_cons) {
if (i.second.is_con(con)) {
int r = i.second.handle_auth_bad_method(old_auth_method,
result,
allowed_methods,
allowed_modes);
if (r == 0) {
return r; // try another method on this con
}
pending_cons.erase(i.first);
if (!pending_cons.empty()) {
return r; // fail this con, maybe another con will succeed
}
// fail hunt
_finish_hunting(r);
_finish_auth(r);
return r;
}
}
return -ENOENT;
} else {
// huh...
ldout(cct,10) << __func__ << " hmm, they didn't like " << old_auth_method
<< " result " << cpp_strerror(result)
<< " and auth is " << (auth ? auth->get_protocol() : 0)
<< dendl;
return -EACCES;
}
}
int MonClient::handle_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
bool more,
uint32_t auth_method,
const ceph::buffer::list& payload,
ceph::buffer::list *reply)
{
if (payload.length() == 0) {
// for some channels prior to nautilus (osd heartbeat), we
// tolerate the lack of an authorizer.
if (!con->get_messenger()->require_authorizer) {
handle_authentication_dispatcher->ms_handle_authentication(con);
return 1;
}
return -EACCES;
}
auth_meta->auth_mode = payload[0];
if (auth_meta->auth_mode < AUTH_MODE_AUTHORIZER ||
auth_meta->auth_mode > AUTH_MODE_AUTHORIZER_MAX) {
return -EACCES;
}
AuthAuthorizeHandler *ah = get_auth_authorize_handler(con->get_peer_type(),
auth_method);
if (!ah) {
lderr(cct) << __func__ << " no AuthAuthorizeHandler found for auth method "
<< auth_method << dendl;
return -EOPNOTSUPP;
}
auto ac = &auth_meta->authorizer_challenge;
if (auth_meta->skip_authorizer_challenge) {
ldout(cct, 10) << __func__ << " skipping challenge on " << con << dendl;
ac = nullptr;
}
bool was_challenge = (bool)auth_meta->authorizer_challenge;
bool isvalid = ah->verify_authorizer(
cct,
*rotating_secrets,
payload,
auth_meta->get_connection_secret_length(),
reply,
&con->peer_name,
&con->peer_global_id,
&con->peer_caps_info,
&auth_meta->session_key,
&auth_meta->connection_secret,
ac);
if (isvalid) {
handle_authentication_dispatcher->ms_handle_authentication(con);
return 1;
}
if (!more && !was_challenge && auth_meta->authorizer_challenge) {
ldout(cct,10) << __func__ << " added challenge on " << con << dendl;
return 0;
}
ldout(cct,10) << __func__ << " bad authorizer on " << con << dendl;
// discard old challenge
auth_meta->authorizer_challenge.reset();
return -EACCES;
}
AuthAuthorizer* MonClient::build_authorizer(int service_id) const {
std::lock_guard l(monc_lock);
if (auth) {
return auth->build_authorizer(service_id);
} else {
ldout(cct, 0) << __func__ << " for " << ceph_entity_type_name(service_id)
<< ", but no auth is available now" << dendl;
return nullptr;
}
}
#define dout_subsys ceph_subsys_monc
#undef dout_prefix
#define dout_prefix *_dout << "monclient" << (have_session() ? ": " : "(hunting): ")
MonConnection::MonConnection(
CephContext *cct, ConnectionRef con, uint64_t global_id,
AuthRegistry *ar)
: cct(cct), con(con), global_id(global_id), auth_registry(ar)
{}
MonConnection::~MonConnection()
{
if (con) {
con->mark_down();
con.reset();
}
}
bool MonConnection::have_session() const
{
return state == State::HAVE_SESSION;
}
void MonConnection::start(epoch_t epoch,
const EntityName& entity_name)
{
using ceph::encode;
auth_start = ceph_clock_now();
if (con->get_peer_addr().is_msgr2()) {
ldout(cct, 10) << __func__ << " opening mon connection" << dendl;
state = State::AUTHENTICATING;
con->send_message(new MMonGetMap());
return;
}
// restart authentication handshake
state = State::NEGOTIATING;
// send an initial keepalive to ensure our timestamp is valid by the
// time we are in an OPENED state (by sequencing this before
// authentication).
con->send_keepalive();
auto m = new MAuth;
m->protocol = CEPH_AUTH_UNKNOWN;
m->monmap_epoch = epoch;
__u8 struct_v = 1;
encode(struct_v, m->auth_payload);
std::vector<uint32_t> auth_supported;
auth_registry->get_supported_methods(con->get_peer_type(), &auth_supported);
encode(auth_supported, m->auth_payload);
encode(entity_name, m->auth_payload);
encode(global_id, m->auth_payload);
con->send_message(m);
}
int MonConnection::get_auth_request(
uint32_t *method,
std::vector<uint32_t> *preferred_modes,
ceph::buffer::list *bl,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring)
{
using ceph::encode;
// choose method
if (auth_method < 0) {
std::vector<uint32_t> as;
auth_registry->get_supported_methods(con->get_peer_type(), &as);
if (as.empty()) {
return -EACCES;
}
auth_method = as.front();
}
*method = auth_method;
auth_registry->get_supported_modes(con->get_peer_type(), auth_method,
preferred_modes);
ldout(cct,10) << __func__ << " method " << *method
<< " preferred_modes " << *preferred_modes << dendl;
if (preferred_modes->empty()) {
return -EACCES;
}
int r = _init_auth(*method, entity_name, want_keys, keyring, true);
ceph_assert(r == 0);
// initial requset includes some boilerplate...
encode((char)AUTH_MODE_MON, *bl);
encode(entity_name, *bl);
encode(global_id, *bl);
// and (maybe) some method-specific initial payload
auth->build_initial_request(bl);
return 0;
}
int MonConnection::handle_auth_reply_more(
AuthConnectionMeta *auth_meta,
const ceph::buffer::list& bl,
ceph::buffer::list *reply)
{
ldout(cct, 10) << __func__ << " payload " << bl.length() << dendl;
ldout(cct, 30) << __func__ << " got\n";
bl.hexdump(*_dout);
*_dout << dendl;
auto p = bl.cbegin();
ldout(cct, 10) << __func__ << " payload_len " << bl.length() << dendl;
int r = auth->handle_response(0, p, &auth_meta->session_key,
&auth_meta->connection_secret);
if (r == -EAGAIN) {
auth->prepare_build_request();
auth->build_request(*reply);
ldout(cct, 10) << __func__ << " responding with " << reply->length()
<< " bytes" << dendl;
r = 0;
} else if (r < 0) {
lderr(cct) << __func__ << " handle_response returned " << r << dendl;
} else {
ldout(cct, 10) << __func__ << " authenticated!" << dendl;
// FIXME
ceph_abort(cct, "write me");
}
return r;
}
int MonConnection::handle_auth_done(
AuthConnectionMeta *auth_meta,
uint64_t new_global_id,
const ceph::buffer::list& bl,
CryptoKey *session_key,
std::string *connection_secret)
{
ldout(cct,10) << __func__ << " global_id " << new_global_id
<< " payload " << bl.length()
<< dendl;
global_id = new_global_id;
auth->set_global_id(global_id);
auto p = bl.begin();
int auth_err = auth->handle_response(0, p, &auth_meta->session_key,
&auth_meta->connection_secret);
if (auth_err >= 0) {
state = State::HAVE_SESSION;
}
con->set_last_keepalive_ack(auth_start);
if (pending_tell_command) {
con->send_message2(std::move(pending_tell_command));
}
return auth_err;
}
int MonConnection::handle_auth_bad_method(
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes)
{
ldout(cct,10) << __func__ << " old_auth_method " << old_auth_method
<< " result " << cpp_strerror(result)
<< " allowed_methods " << allowed_methods << dendl;
std::vector<uint32_t> auth_supported;
auth_registry->get_supported_methods(con->get_peer_type(), &auth_supported);
auto p = std::find(auth_supported.begin(), auth_supported.end(),
old_auth_method);
assert(p != auth_supported.end());
p = std::find_first_of(std::next(p), auth_supported.end(),
allowed_methods.begin(), allowed_methods.end());
if (p == auth_supported.end()) {
lderr(cct) << __func__ << " server allowed_methods " << allowed_methods
<< " but i only support " << auth_supported << dendl;
return -EACCES;
}
auth_method = *p;
ldout(cct,10) << __func__ << " will try " << auth_method << " next" << dendl;
return 0;
}
int MonConnection::handle_auth(MAuthReply* m,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring)
{
if (state == State::NEGOTIATING) {
int r = _negotiate(m, entity_name, want_keys, keyring);
if (r) {
return r;
}
state = State::AUTHENTICATING;
}
int r = authenticate(m);
if (!r) {
state = State::HAVE_SESSION;
}
return r;
}
int MonConnection::_negotiate(MAuthReply *m,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring)
{
ldout(cct, 10) << __func__ << dendl;
int r = _init_auth(m->protocol, entity_name, want_keys, keyring, false);
if (r == -ENOTSUP) {
if (m->result == -ENOTSUP) {
ldout(cct, 10) << "none of our auth protocols are supported by the server"
<< dendl;
}
return m->result;
}
return r;
}
int MonConnection::_init_auth(
uint32_t method,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring,
bool msgr2)
{
ldout(cct, 10) << __func__ << " method " << method << dendl;
if (auth && auth->get_protocol() == (int)method) {
ldout(cct, 10) << __func__ << " already have auth, reseting" << dendl;
auth->reset();
return 0;
}
ldout(cct, 10) << __func__ << " creating new auth" << dendl;
auth.reset(AuthClientHandler::create(cct, method, keyring));
if (!auth) {
ldout(cct, 10) << " no handler for protocol " << method << dendl;
return -ENOTSUP;
}
// do not request MGR key unless the mon has the SERVER_KRAKEN
// feature. otherwise it will give us an auth error. note that
// we have to use the FEATUREMASK because pre-jewel the kraken
// feature bit was used for something else.
if (!msgr2 &&
(want_keys & CEPH_ENTITY_TYPE_MGR) &&
!(con->has_features(CEPH_FEATUREMASK_SERVER_KRAKEN))) {
ldout(cct, 1) << __func__
<< " not requesting MGR keys from pre-kraken monitor"
<< dendl;
want_keys &= ~CEPH_ENTITY_TYPE_MGR;
}
auth->set_want_keys(want_keys);
auth->init(entity_name);
auth->set_global_id(global_id);
return 0;
}
int MonConnection::authenticate(MAuthReply *m)
{
ceph_assert(auth);
if (!m->global_id) {
ldout(cct, 1) << "peer sent an invalid global_id" << dendl;
}
if (m->global_id != global_id) {
// it's a new session
auth->reset();
global_id = m->global_id;
auth->set_global_id(global_id);
ldout(cct, 10) << "my global_id is " << m->global_id << dendl;
}
auto p = m->result_bl.cbegin();
int ret = auth->handle_response(m->result, p, nullptr, nullptr);
if (ret == -EAGAIN) {
auto ma = new MAuth;
ma->protocol = auth->get_protocol();
auth->prepare_build_request();
auth->build_request(ma->auth_payload);
con->send_message(ma);
}
if (ret == 0 && pending_tell_command) {
con->send_message2(std::move(pending_tell_command));
}
return ret;
}
void MonClient::register_config_callback(md_config_t::config_callback fn) {
ceph_assert(!config_cb);
config_cb = fn;
}
md_config_t::config_callback MonClient::get_config_callback() {
return config_cb;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
class monc_error_category : public ceph::converting_category {
public:
monc_error_category(){}
const char* name() const noexcept override;
const char* message(int ev, char*, std::size_t) const noexcept override;
std::string message(int ev) const override;
bs::error_condition default_error_condition(int ev) const noexcept
override;
bool equivalent(int ev, const bs::error_condition& c) const
noexcept override;
using ceph::converting_category::equivalent;
int from_code(int ev) const noexcept override;
};
#pragma GCC diagnostic pop
#pragma clang diagnostic pop
const char* monc_error_category::name() const noexcept {
return "monc";
}
const char* monc_error_category::message(int ev, char*, std::size_t) const noexcept {
if (ev == 0)
return "No error";
switch (static_cast<monc_errc>(ev)) {
case monc_errc::shutting_down: // Command failed due to MonClient shutting down
return "Command failed due to MonClient shutting down";
case monc_errc::session_reset:
return "Monitor session was reset";
case monc_errc::rank_dne:
return "Requested monitor rank does not exist";
case monc_errc::mon_dne:
return "Requested monitor does not exist";
case monc_errc::timed_out:
return "Monitor operation timed out";
case monc_errc::mon_unavailable:
return "Monitor unavailable";
}
return "Unknown error";
}
std::string monc_error_category::message(int ev) const {
return message(ev, nullptr, 0);
}
bs::error_condition monc_error_category::default_error_condition(int ev) const noexcept {
switch (static_cast<monc_errc>(ev)) {
case monc_errc::shutting_down:
return bs::errc::operation_canceled;
case monc_errc::session_reset:
return bs::errc::resource_unavailable_try_again;
case monc_errc::rank_dne:
[[fallthrough]];
case monc_errc::mon_dne:
return ceph::errc::not_in_map;
case monc_errc::timed_out:
return bs::errc::timed_out;
case monc_errc::mon_unavailable:
return bs::errc::no_such_device;
}
return { ev, *this };
}
bool monc_error_category::equivalent(int ev, const bs::error_condition& c) const noexcept {
switch (static_cast<monc_errc>(ev)) {
case monc_errc::rank_dne:
[[fallthrough]];
case monc_errc::mon_dne:
return c == bs::errc::no_such_file_or_directory;
default:
return default_error_condition(ev) == c;
}
}
int monc_error_category::from_code(int ev) const noexcept {
if (ev == 0)
return 0;
switch (static_cast<monc_errc>(ev)) {
case monc_errc::shutting_down:
return -ECANCELED;
case monc_errc::session_reset:
return -EAGAIN;
case monc_errc::rank_dne:
[[fallthrough]];
case monc_errc::mon_dne:
return -ENOENT;
case monc_errc::timed_out:
return -ETIMEDOUT;
case monc_errc::mon_unavailable:
return -ENXIO;
}
return -EDOM;
}
const bs::error_category& monc_category() noexcept {
static const monc_error_category c;
return c;
}
| 58,289 | 27.20029 | 126 |
cc
|
null |
ceph-main/src/mon/MonClient.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MONCLIENT_H
#define CEPH_MONCLIENT_H
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "msg/Messenger.h"
#include "MonMap.h"
#include "MonSub.h"
#include "common/admin_socket.h"
#include "common/async/completion.h"
#include "common/Timer.h"
#include "common/config.h"
#include "messages/MMonGetVersion.h"
#include "auth/AuthClient.h"
#include "auth/AuthServer.h"
class MMonMap;
class MConfig;
class MMonGetVersionReply;
class MMonCommandAck;
class LogClient;
class AuthClientHandler;
class AuthRegistry;
class KeyRing;
class RotatingKeyRing;
class MonConnection {
public:
MonConnection(CephContext *cct,
ConnectionRef conn,
uint64_t global_id,
AuthRegistry *auth_registry);
~MonConnection();
MonConnection(MonConnection&& rhs) = default;
MonConnection& operator=(MonConnection&&) = default;
MonConnection(const MonConnection& rhs) = delete;
MonConnection& operator=(const MonConnection&) = delete;
int handle_auth(MAuthReply *m,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring);
int authenticate(MAuthReply *m);
void start(epoch_t epoch,
const EntityName& entity_name);
bool have_session() const;
uint64_t get_global_id() const {
return global_id;
}
ConnectionRef get_con() {
return con;
}
std::unique_ptr<AuthClientHandler>& get_auth() {
return auth;
}
int get_auth_request(
uint32_t *method,
std::vector<uint32_t> *preferred_modes,
ceph::buffer::list *out,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring);
int handle_auth_reply_more(
AuthConnectionMeta *auth_meta,
const ceph::buffer::list& bl,
ceph::buffer::list *reply);
int handle_auth_done(
AuthConnectionMeta *auth_meta,
uint64_t global_id,
const ceph::buffer::list& bl,
CryptoKey *session_key,
std::string *connection_secret);
int handle_auth_bad_method(
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes);
bool is_con(Connection *c) const {
return con.get() == c;
}
void queue_command(Message *m) {
pending_tell_command = m;
}
private:
int _negotiate(MAuthReply *m,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring);
int _init_auth(uint32_t method,
const EntityName& entity_name,
uint32_t want_keys,
RotatingKeyRing* keyring,
bool msgr2);
private:
CephContext *cct;
enum class State {
NONE,
NEGOTIATING, // v1 only
AUTHENTICATING, // v1 and v2
HAVE_SESSION,
};
State state = State::NONE;
ConnectionRef con;
int auth_method = -1;
utime_t auth_start;
std::unique_ptr<AuthClientHandler> auth;
uint64_t global_id;
MessageRef pending_tell_command;
AuthRegistry *auth_registry;
};
struct MonClientPinger : public Dispatcher,
public AuthClient {
ceph::mutex lock = ceph::make_mutex("MonClientPinger::lock");
ceph::condition_variable ping_recvd_cond;
std::string *result;
bool done;
RotatingKeyRing *keyring;
std::unique_ptr<MonConnection> mc;
MonClientPinger(CephContext *cct_,
RotatingKeyRing *keyring,
std::string *res_) :
Dispatcher(cct_),
result(res_),
done(false),
keyring(keyring)
{ }
int wait_for_reply(double timeout = 0.0) {
std::unique_lock locker{lock};
if (timeout <= 0) {
timeout = std::chrono::duration<double>(cct->_conf.get_val<std::chrono::seconds>("client_mount_timeout")).count();
}
done = false;
if (ping_recvd_cond.wait_for(locker,
ceph::make_timespan(timeout),
[this] { return done; })) {
return 0;
} else {
return ETIMEDOUT;
}
}
bool ms_dispatch(Message *m) override {
using ceph::decode;
std::lock_guard l(lock);
if (m->get_type() != CEPH_MSG_PING)
return false;
ceph::buffer::list &payload = m->get_payload();
if (result && payload.length() > 0) {
auto p = std::cbegin(payload);
decode(*result, p);
}
done = true;
ping_recvd_cond.notify_all();
m->put();
return true;
}
bool ms_handle_reset(Connection *con) override {
std::lock_guard l(lock);
done = true;
ping_recvd_cond.notify_all();
return true;
}
void ms_handle_remote_reset(Connection *con) override {}
bool ms_handle_refused(Connection *con) override {
return false;
}
// AuthClient
int get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *auth_method,
std::vector<uint32_t> *preferred_modes,
ceph::buffer::list *bl) override {
return mc->get_auth_request(auth_method, preferred_modes, bl,
cct->_conf->name, 0, keyring);
}
int handle_auth_reply_more(
Connection *con,
AuthConnectionMeta *auth_meta,
const ceph::buffer::list& bl,
ceph::buffer::list *reply) override {
return mc->handle_auth_reply_more(auth_meta, bl, reply);
}
int handle_auth_done(
Connection *con,
AuthConnectionMeta *auth_meta,
uint64_t global_id,
uint32_t con_mode,
const ceph::buffer::list& bl,
CryptoKey *session_key,
std::string *connection_secret) override {
return mc->handle_auth_done(auth_meta, global_id, bl,
session_key, connection_secret);
}
int handle_auth_bad_method(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) override {
return mc->handle_auth_bad_method(old_auth_method, result,
allowed_methods, allowed_modes);
}
};
const boost::system::error_category& monc_category() noexcept;
enum class monc_errc {
shutting_down = 1, // Command failed due to MonClient shutting down
session_reset, // Monitor session was reset
rank_dne, // Requested monitor rank does not exist
mon_dne, // Requested monitor does not exist
timed_out, // Monitor operation timed out
mon_unavailable // Monitor unavailable
};
namespace boost::system {
template<>
struct is_error_code_enum<::monc_errc> {
static const bool value = true;
};
}
// implicit conversion:
inline boost::system::error_code make_error_code(monc_errc e) noexcept {
return { static_cast<int>(e), monc_category() };
}
// explicit conversion:
inline boost::system::error_condition make_error_condition(monc_errc e) noexcept {
return { static_cast<int>(e), monc_category() };
}
const boost::system::error_category& monc_category() noexcept;
class MonClient : public Dispatcher,
public AuthClient,
public AuthServer, /* for mgr, osd, mds */
public AdminSocketHook {
static constexpr auto dout_subsys = ceph_subsys_monc;
public:
// Error, Newest, Oldest
using VersionSig = void(boost::system::error_code, version_t, version_t);
using VersionCompletion = ceph::async::Completion<VersionSig>;
using CommandSig = void(boost::system::error_code, std::string,
ceph::buffer::list);
using CommandCompletion = ceph::async::Completion<CommandSig>;
MonMap monmap;
std::map<std::string,std::string> config_mgr;
private:
Messenger *messenger;
std::unique_ptr<MonConnection> active_con;
std::map<entity_addrvec_t, MonConnection> pending_cons;
std::set<unsigned> tried;
EntityName entity_name;
mutable ceph::mutex monc_lock = ceph::make_mutex("MonClient::monc_lock");
SafeTimer timer;
boost::asio::io_context& service;
boost::asio::io_context::strand finish_strand{service};
bool initialized;
bool stopping = false;
LogClient *log_client;
bool more_log_pending;
void send_log(bool flush = false);
bool ms_dispatch(Message *m) override;
bool ms_handle_reset(Connection *con) override;
void ms_handle_remote_reset(Connection *con) override {}
bool ms_handle_refused(Connection *con) override { return false; }
void handle_monmap(MMonMap *m);
void handle_config(MConfig *m);
void handle_auth(MAuthReply *m);
int call(
std::string_view command,
const cmdmap_t& cmdmap,
const ceph::buffer::list &inbl,
ceph::Formatter *f,
std::ostream& errss,
ceph::buffer::list& out) override;
// monitor session
utime_t last_keepalive;
utime_t last_send_log;
void tick();
void schedule_tick();
// monclient
bool want_monmap;
ceph::condition_variable map_cond;
bool passthrough_monmap = false;
bool want_bootstrap_config = false;
ceph::ref_t<MConfig> bootstrap_config;
// authenticate
std::unique_ptr<AuthClientHandler> auth;
uint32_t want_keys = 0;
uint64_t global_id = 0;
ceph::condition_variable auth_cond;
int authenticate_err = 0;
bool authenticated = false;
std::list<MessageRef> waiting_for_session;
utime_t last_rotating_renew_sent;
bool had_a_connection;
double reopen_interval_multiplier;
Dispatcher *handle_authentication_dispatcher = nullptr;
bool _opened() const;
bool _hunting() const;
void _start_hunting();
void _finish_hunting(int auth_err);
void _finish_auth(int auth_err);
void _reopen_session(int rank = -1);
void _add_conn(unsigned rank);
void _add_conns();
void _un_backoff();
void _send_mon_message(MessageRef m);
std::map<entity_addrvec_t, MonConnection>::iterator _find_pending_con(
const ConnectionRef& con) {
for (auto i = pending_cons.begin(); i != pending_cons.end(); ++i) {
if (i->second.get_con() == con) {
return i;
}
}
return pending_cons.end();
}
public:
// AuthClient
int get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *method,
std::vector<uint32_t> *preferred_modes,
ceph::buffer::list *bl) override;
int handle_auth_reply_more(
Connection *con,
AuthConnectionMeta *auth_meta,
const ceph::buffer::list& bl,
ceph::buffer::list *reply) override;
int handle_auth_done(
Connection *con,
AuthConnectionMeta *auth_meta,
uint64_t global_id,
uint32_t con_mode,
const ceph::buffer::list& bl,
CryptoKey *session_key,
std::string *connection_secret) override;
int handle_auth_bad_method(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) override;
// AuthServer
int handle_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
bool more,
uint32_t auth_method,
const ceph::buffer::list& bl,
ceph::buffer::list *reply) override;
void set_entity_name(EntityName name) { entity_name = name; }
void set_handle_authentication_dispatcher(Dispatcher *d) {
handle_authentication_dispatcher = d;
}
int _check_auth_tickets();
int _check_auth_rotating();
int wait_auth_rotating(double timeout);
int authenticate(double timeout=0.0);
bool is_authenticated() const {return authenticated;}
bool is_connected() const { return active_con != nullptr; }
/**
* Try to flush as many log messages as we can in a single
* message. Use this before shutting down to transmit your
* last message.
*/
void flush_log();
private:
// mon subscriptions
MonSub sub;
void _renew_subs();
void handle_subscribe_ack(MMonSubscribeAck* m);
public:
void renew_subs() {
std::lock_guard l(monc_lock);
_renew_subs();
}
bool sub_want(std::string what, version_t start, unsigned flags) {
std::lock_guard l(monc_lock);
return sub.want(what, start, flags);
}
void sub_got(std::string what, version_t have) {
std::lock_guard l(monc_lock);
sub.got(what, have);
}
void sub_unwant(std::string what) {
std::lock_guard l(monc_lock);
sub.unwant(what);
}
bool sub_want_increment(std::string what, version_t start, unsigned flags) {
std::lock_guard l(monc_lock);
return sub.inc_want(what, start, flags);
}
std::unique_ptr<KeyRing> keyring;
std::unique_ptr<RotatingKeyRing> rotating_secrets;
public:
MonClient(CephContext *cct_, boost::asio::io_context& service);
MonClient(const MonClient &) = delete;
MonClient& operator=(const MonClient &) = delete;
~MonClient() override;
int init();
void shutdown();
void set_log_client(LogClient *clog) {
log_client = clog;
}
LogClient *get_log_client() {
return log_client;
}
int build_initial_monmap();
int get_monmap();
int get_monmap_and_config();
/**
* If you want to see MonMap messages, set this and
* the MonClient will tell the Messenger it hasn't
* dealt with it.
* Note that if you do this, *you* are of course responsible for
* putting the message reference!
*/
void set_passthrough_monmap() {
std::lock_guard l(monc_lock);
passthrough_monmap = true;
}
void unset_passthrough_monmap() {
std::lock_guard l(monc_lock);
passthrough_monmap = false;
}
/**
* Ping monitor with ID @p mon_id and record the resulting
* reply in @p result_reply.
*
* @param[in] mon_id Target monitor's ID
* @param[out] result_reply reply from mon.ID, if param != NULL
* @returns 0 in case of success; < 0 in case of error,
* -ETIMEDOUT if monitor didn't reply before timeout
* expired (default: conf->client_mount_timeout).
*/
int ping_monitor(const std::string &mon_id, std::string *result_reply);
void send_mon_message(Message *m) {
send_mon_message(MessageRef{m, false});
}
void send_mon_message(MessageRef m);
void reopen_session() {
std::lock_guard l(monc_lock);
_reopen_session();
}
const uuid_d& get_fsid() const {
return monmap.fsid;
}
entity_addrvec_t get_mon_addrs(unsigned i) const {
std::lock_guard l(monc_lock);
if (i < monmap.size())
return monmap.get_addrs(i);
return entity_addrvec_t();
}
int get_num_mon() const {
std::lock_guard l(monc_lock);
return monmap.size();
}
uint64_t get_global_id() const {
std::lock_guard l(monc_lock);
return global_id;
}
void set_messenger(Messenger *m) { messenger = m; }
entity_addrvec_t get_myaddrs() const { return messenger->get_myaddrs(); }
AuthAuthorizer* build_authorizer(int service_id) const;
void set_want_keys(uint32_t want) {
want_keys = want;
}
// admin commands
private:
uint64_t last_mon_command_tid;
struct MonCommand {
// for tell only
std::string target_name;
int target_rank = -1;
ConnectionRef target_con;
std::unique_ptr<MonConnection> target_session;
unsigned send_attempts = 0; ///< attempt count for legacy mons
utime_t last_send_attempt;
uint64_t tid;
std::vector<std::string> cmd;
ceph::buffer::list inbl;
std::unique_ptr<CommandCompletion> onfinish;
std::optional<boost::asio::steady_timer> cancel_timer;
MonCommand(MonClient& monc, uint64_t t, std::unique_ptr<CommandCompletion> onfinish)
: tid(t), onfinish(std::move(onfinish)) {
auto timeout =
monc.cct->_conf.get_val<std::chrono::seconds>("rados_mon_op_timeout");
if (timeout.count() > 0) {
cancel_timer.emplace(monc.service, timeout);
cancel_timer->async_wait(
[this, &monc](boost::system::error_code ec) {
if (ec)
return;
std::scoped_lock l(monc.monc_lock);
monc._cancel_mon_command(tid);
});
}
}
bool is_tell() const {
return target_name.size() || target_rank >= 0;
}
};
friend MonCommand;
std::map<uint64_t,MonCommand*> mon_commands;
void _send_command(MonCommand *r);
void _check_tell_commands();
void _resend_mon_commands();
int _cancel_mon_command(uint64_t tid);
void _finish_command(MonCommand *r, boost::system::error_code ret, std::string_view rs,
bufferlist&& bl);
void _finish_auth();
void handle_mon_command_ack(MMonCommandAck *ack);
void handle_command_reply(MCommandReply *reply);
public:
template<typename CompletionToken>
auto start_mon_command(const std::vector<std::string>& cmd,
const ceph::buffer::list& inbl,
CompletionToken&& token) {
ldout(cct,10) << __func__ << " cmd=" << cmd << dendl;
boost::asio::async_completion<CompletionToken, CommandSig> init(token);
{
std::scoped_lock l(monc_lock);
auto h = CommandCompletion::create(service.get_executor(),
std::move(init.completion_handler));
if (!initialized || stopping) {
ceph::async::post(std::move(h), monc_errc::shutting_down, std::string{},
bufferlist{});
} else {
auto r = new MonCommand(*this, ++last_mon_command_tid, std::move(h));
r->cmd = cmd;
r->inbl = inbl;
mon_commands.emplace(r->tid, r);
_send_command(r);
}
}
return init.result.get();
}
template<typename CompletionToken>
auto start_mon_command(int mon_rank, const std::vector<std::string>& cmd,
const ceph::buffer::list& inbl, CompletionToken&& token) {
ldout(cct,10) << __func__ << " cmd=" << cmd << dendl;
boost::asio::async_completion<CompletionToken, CommandSig> init(token);
{
std::scoped_lock l(monc_lock);
auto h = CommandCompletion::create(service.get_executor(),
std::move(init.completion_handler));
if (!initialized || stopping) {
ceph::async::post(std::move(h), monc_errc::shutting_down, std::string{},
bufferlist{});
} else {
auto r = new MonCommand(*this, ++last_mon_command_tid, std::move(h));
r->target_rank = mon_rank;
r->cmd = cmd;
r->inbl = inbl;
mon_commands.emplace(r->tid, r);
_send_command(r);
}
}
return init.result.get();
}
template<typename CompletionToken>
auto start_mon_command(const std::string& mon_name,
const std::vector<std::string>& cmd,
const ceph::buffer::list& inbl,
CompletionToken&& token) {
ldout(cct,10) << __func__ << " cmd=" << cmd << dendl;
boost::asio::async_completion<CompletionToken, CommandSig> init(token);
{
std::scoped_lock l(monc_lock);
auto h = CommandCompletion::create(service.get_executor(),
std::move(init.completion_handler));
if (!initialized || stopping) {
ceph::async::post(std::move(h), monc_errc::shutting_down, std::string{},
bufferlist{});
} else {
auto r = new MonCommand(*this, ++last_mon_command_tid, std::move(h));
// detect/tolerate mon *rank* passed as a string
std::string err;
int rank = strict_strtoll(mon_name.c_str(), 10, &err);
if (err.size() == 0 && rank >= 0) {
ldout(cct,10) << __func__ << " interpreting name '" << mon_name
<< "' as rank " << rank << dendl;
r->target_rank = rank;
} else {
r->target_name = mon_name;
}
r->cmd = cmd;
r->inbl = inbl;
mon_commands.emplace(r->tid, r);
_send_command(r);
}
}
return init.result.get();
}
class ContextVerter {
std::string* outs;
ceph::bufferlist* outbl;
Context* onfinish;
public:
ContextVerter(std::string* outs, ceph::bufferlist* outbl, Context* onfinish)
: outs(outs), outbl(outbl), onfinish(onfinish) {}
~ContextVerter() = default;
ContextVerter(const ContextVerter&) = default;
ContextVerter& operator =(const ContextVerter&) = default;
ContextVerter(ContextVerter&&) = default;
ContextVerter& operator =(ContextVerter&&) = default;
void operator()(boost::system::error_code e,
std::string s,
ceph::bufferlist bl) {
if (outs)
*outs = std::move(s);
if (outbl)
*outbl = std::move(bl);
if (onfinish)
onfinish->complete(ceph::from_error_code(e));
}
};
void start_mon_command(const std::vector<std::string>& cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs,
Context *onfinish) {
start_mon_command(cmd, inbl, ContextVerter(outs, outbl, onfinish));
}
void start_mon_command(int mon_rank,
const std::vector<std::string>& cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs,
Context *onfinish) {
start_mon_command(mon_rank, cmd, inbl, ContextVerter(outs, outbl, onfinish));
}
void start_mon_command(const std::string &mon_name, ///< mon name, with mon. prefix
const std::vector<std::string>& cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs,
Context *onfinish) {
start_mon_command(mon_name, cmd, inbl, ContextVerter(outs, outbl, onfinish));
}
// version requests
public:
/**
* get latest known version(s) of cluster map
*
* @param map string name of map (e.g., 'osdmap')
* @param token context that will be triggered on completion
* @return (via Completion) {} on success,
* boost::system::errc::resource_unavailable_try_again if we need to
* resubmit our request
*/
template<typename CompletionToken>
auto get_version(std::string&& map, CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, VersionSig> init(token);
{
std::scoped_lock l(monc_lock);
auto m = ceph::make_message<MMonGetVersion>();
m->what = std::move(map);
m->handle = ++version_req_id;
version_requests.emplace(m->handle,
VersionCompletion::create(
service.get_executor(),
std::move(init.completion_handler)));
_send_mon_message(m);
}
return init.result.get();
}
/**
* Run a callback within our lock, with a reference
* to the MonMap
*/
template<typename Callback, typename...Args>
auto with_monmap(Callback&& cb, Args&&...args) const ->
decltype(cb(monmap, std::forward<Args>(args)...)) {
std::lock_guard l(monc_lock);
return std::forward<Callback>(cb)(monmap, std::forward<Args>(args)...);
}
void register_config_callback(md_config_t::config_callback fn);
void register_config_notify_callback(std::function<void(void)> f) {
config_notify_cb = f;
}
md_config_t::config_callback get_config_callback();
private:
std::map<ceph_tid_t, std::unique_ptr<VersionCompletion>> version_requests;
ceph_tid_t version_req_id;
void handle_get_version_reply(MMonGetVersionReply* m);
md_config_t::config_callback config_cb;
std::function<void(void)> config_notify_cb;
};
#endif
| 22,553 | 27.73121 | 120 |
h
|
null |
ceph-main/src/mon/MonCommand.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#pragma once
#include <string>
#include "include/encoding.h"
struct MonCommand {
std::string cmdstring;
std::string helpstring;
std::string module;
std::string req_perms;
uint64_t flags;
// MonCommand flags
static const uint64_t FLAG_NONE = 0;
static const uint64_t FLAG_NOFORWARD = 1 << 0;
static const uint64_t FLAG_OBSOLETE = 1 << 1;
static const uint64_t FLAG_DEPRECATED = 1 << 2;
static const uint64_t FLAG_MGR = 1 << 3;
static const uint64_t FLAG_POLL = 1 << 4;
static const uint64_t FLAG_HIDDEN = 1 << 5;
// asok and tell commands are not forwarded, and they should not be listed
// in --help output.
static const uint64_t FLAG_TELL = (FLAG_NOFORWARD | FLAG_HIDDEN);
bool has_flag(uint64_t flag) const { return (flags & flag) == flag; }
void set_flag(uint64_t flag) { flags |= flag; }
void unset_flag(uint64_t flag) { flags &= ~flag; }
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode_bare(bl);
encode(flags, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
DECODE_START(1, bl);
decode_bare(bl);
decode(flags, bl);
DECODE_FINISH(bl);
}
/**
* Unversioned encoding for use within encode_array.
*/
void encode_bare(ceph::buffer::list &bl) const {
using ceph::encode;
encode(cmdstring, bl);
encode(helpstring, bl);
encode(module, bl);
encode(req_perms, bl);
std::string availability = "cli,rest"; // Removed field, for backward compat
encode(availability, bl);
}
void decode_bare(ceph::buffer::list::const_iterator &bl) {
using ceph::decode;
decode(cmdstring, bl);
decode(helpstring, bl);
decode(module, bl);
decode(req_perms, bl);
std::string availability; // Removed field, for backward compat
decode(availability, bl);
}
bool is_compat(const MonCommand* o) const {
return cmdstring == o->cmdstring &&
module == o->module && req_perms == o->req_perms;
}
bool is_tell() const {
return has_flag(MonCommand::FLAG_TELL);
}
bool is_noforward() const {
return has_flag(MonCommand::FLAG_NOFORWARD);
}
bool is_obsolete() const {
return has_flag(MonCommand::FLAG_OBSOLETE);
}
bool is_deprecated() const {
return has_flag(MonCommand::FLAG_DEPRECATED);
}
bool is_mgr() const {
return has_flag(MonCommand::FLAG_MGR);
}
bool is_hidden() const {
return has_flag(MonCommand::FLAG_HIDDEN);
}
static void encode_array(const MonCommand *cmds, int size, ceph::buffer::list &bl) {
ENCODE_START(2, 1, bl);
uint16_t s = size;
encode(s, bl);
for (int i = 0; i < size; ++i) {
cmds[i].encode_bare(bl);
}
for (int i = 0; i < size; i++) {
encode(cmds[i].flags, bl);
}
ENCODE_FINISH(bl);
}
static void decode_array(MonCommand **cmds, int *size,
ceph::buffer::list::const_iterator &bl) {
DECODE_START(2, bl);
uint16_t s = 0;
decode(s, bl);
*size = s;
*cmds = new MonCommand[*size];
for (int i = 0; i < *size; ++i) {
(*cmds)[i].decode_bare(bl);
}
if (struct_v >= 2) {
for (int i = 0; i < *size; i++)
decode((*cmds)[i].flags, bl);
} else {
for (int i = 0; i < *size; i++)
(*cmds)[i].flags = 0;
}
DECODE_FINISH(bl);
}
// this uses a u16 for the count, so we need a special encoder/decoder.
static void encode_vector(const std::vector<MonCommand>& cmds,
ceph::buffer::list &bl) {
ENCODE_START(2, 1, bl);
uint16_t s = cmds.size();
encode(s, bl);
for (unsigned i = 0; i < s; ++i) {
cmds[i].encode_bare(bl);
}
for (unsigned i = 0; i < s; i++) {
encode(cmds[i].flags, bl);
}
ENCODE_FINISH(bl);
}
static void decode_vector(std::vector<MonCommand> &cmds,
ceph::buffer::list::const_iterator &bl) {
DECODE_START(2, bl);
uint16_t s = 0;
decode(s, bl);
cmds.resize(s);
for (unsigned i = 0; i < s; ++i) {
cmds[i].decode_bare(bl);
}
if (struct_v >= 2) {
for (unsigned i = 0; i < s; i++)
decode(cmds[i].flags, bl);
} else {
for (unsigned i = 0; i < s; i++)
cmds[i].flags = 0;
}
DECODE_FINISH(bl);
}
bool requires_perm(char p) const {
return (req_perms.find(p) != std::string::npos);
}
};
WRITE_CLASS_ENCODER(MonCommand)
| 4,823 | 26.409091 | 86 |
h
|
null |
ceph-main/src/mon/MonCommands.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* no guard; may be included multiple times */
/*
* Define commands that are reported by the monitor's
* "get_command_descriptions" command, and parsed by the Python
* frontend 'ceph' (and perhaps by other frontends, such as a RESTful
* server). The format is:
*
* COMMAND(signature, helpstring, modulename, req perms, availability)
* where:
* signature: describes the command and its parameters (more below)
* helpstring: displays in CLI help, API help (nice if it refers to
* parameter names from signature, 40-a few hundred chars)
* modulename: the monitor module or daemon this applies to:
* mds, osd, pg (osd), mon, auth, log, config-key, mgr
* req perms: required permission in that modulename space to execute command
* this also controls what type of REST command is accepted
*
* The commands describe themselves completely enough for the separate
* frontend(s) to be able to accept user input and validate it against
* the command descriptions, and generate a JSON object that contains
* key:value mappings of parameter names to validated parameter values.
*
* 'signature' is a space-separated list of individual command descriptors;
* each descriptor is either a literal string, which can contain no spaces or
* '=' signs (for instance, in "pg stat", both "pg" and "stat" are literal
* strings representing one descriptor each), or a list of key=val[,key=val...]
* which also includes no spaces.
*
* The key=val form describes a non-literal parameter. Each will have at
* least a name= and type=, and each type can have its own type-specific
* parameters. The parser is the arbiter of these types and their
* interpretation. A few more non-type-specific key=val pairs exist:
*
* req=false marks an optional parameter (default for req is 'true')
* n=<n> is a repeat count for how many of this argument must be supplied.
* n=1 is the default.
* n=N is a special case that means "1 or more".
*
* A perhaps-incomplete list of types:
*
* CephInt: Optional: range=min[|max]
* CephFloat: Optional range
* CephString: optional badchars
* CephSocketpath: validation involves "is it S_ISSOCK"
* CephIPAddr: v4 or v6 addr with optional port, syntax validated
* CephEntityAddr: CephIPAddr + optional '/nonce'
* CephPoolname: Plainold string
* CephObjectname: Another plainold string
* CephPgid: n.xxx where n is an int > 0, xxx is a hex number > 0
* CephName: daemon name, '*' or '<type>.<id>' (id must be int for type osd)
* CephOsdName: osd name, '*' or '<id> or 'osd.<id>' (id must be int)
* CephChoices: strings="foo|bar" means this param can be either
* CephFilepath: openable file
* CephFragment: cephfs 'fragID': val/bits, val in hex 0xnnn, bits in dec
* CephUUID: uuid in text matching Python uuid.UUID()
* CephPrefix: special type assigned to literals
*
* Example:
*
* COMMAND("auth add "
* "name=entity,type=CephString "
* "name=caps,type=CephString,n=N,req=false "
* "-- "
* "name=some_option,type=CephString,req=false",
* "add auth info for <name> from input file, or random key "
* "if no input given, and/or any caps specified in the command")
*
* defines a command "auth add" that takes a required argument "entity"
* of type "CephString", and from 1 to N arguments named "caps" of type
* CephString, at least one of which is required. The front end will
* validate user input against this description. Let's say the user
* enters auth add client.admin 'mon rwx' 'osd *'. The result will be a
* JSON object like {"prefix":"auth add", "entity":"client.admin",
* "caps":["mon rwx", "osd *"]}.
*
* The -- separates positional from non-positional (and, by implication,
* optional) arguments. Note that CephBool is assumed to be non-positional
* and will also implicitly mark that any following arguments are
* non-positional.
*
* Note that
* - string literals are accumulated into 'prefix'
* - n=1 descriptors are given normal string or int object values
* - n=N descriptors are given array values
*
* NOTE: be careful with spaces. Each descriptor must be separated by
* one space, no other characters, so if you split lines as above, be
* sure to close and reopen the quotes, and be careful to include the '
* separating spaces in the quoted string.
*
* The monitor marshals this JSON into a std::map<string, cmd_vartype>
* where cmd_vartype is a boost::variant type-enforcing discriminated
* type, so the monitor is expected to know the type of each argument.
* See cmdparse.cc/h for more details.
*
* The flag parameter for COMMAND_WITH_FLAGS macro must be passed using
* FLAG(f), where 'f' may be one of the following:
*
* NONE - no flag assigned
* NOFORWARD - command may not be forwarded
* OBSOLETE - command is considered obsolete
* DEPRECATED - command is considered deprecated
* MGR - command goes to ceph-mgr (for luminous+)
* POLL - command is intended to be called periodically by the
* client (see iostat)
* HIDDEN - command is hidden (no reported by help etc)
* TELL - tell/asok command. it's an alias of (NOFORWARD | HIDDEN)
*
* A command should always be first considered DEPRECATED before being
* considered OBSOLETE, giving due consideration to users and conforming
* to any guidelines regarding deprecating commands.
*/
COMMAND("pg map name=pgid,type=CephPgid", "show mapping of pg to osds", \
"pg", "r")
COMMAND("pg repeer name=pgid,type=CephPgid", "force a PG to repeer",
"osd", "rw")
COMMAND("osd last-stat-seq name=id,type=CephOsdName", \
"get the last pg stats sequence number reported for this osd", \
"osd", "r")
/*
* auth commands AuthMonitor.cc
*/
COMMAND("auth export name=entity,type=CephString,req=false", \
"write keyring for requested entity, or master keyring if none given", \
"auth", "rx")
COMMAND("auth get name=entity,type=CephString", \
"write keyring file with requested key", "auth", "rx")
COMMAND("auth get-key name=entity,type=CephString", "display requested key", \
"auth", "rx")
COMMAND("auth print-key name=entity,type=CephString", "display requested key", \
"auth", "rx")
COMMAND("auth print_key name=entity,type=CephString", "display requested key", \
"auth", "rx")
COMMAND_WITH_FLAG("auth list", "list authentication state", "auth", "rx",
FLAG(DEPRECATED))
COMMAND("auth ls", "list authentication state", "auth", "rx")
COMMAND("auth import", "auth import: read keyring file from -i <file>",
"auth", "rwx")
COMMAND("auth add "
"name=entity,type=CephString "
"name=caps,type=CephString,n=N,req=false",
"add auth info for <entity> from input file, or random key if no "
"input is given, and/or any caps specified in the command",
"auth", "rwx")
COMMAND("auth get-or-create-key "
"name=entity,type=CephString "
"name=caps,type=CephString,n=N,req=false",
"get, or add, key for <name> from system/caps pairs specified in the command. If key already exists, any given caps must match the existing caps for that key.",
"auth", "rwx")
COMMAND("auth get-or-create "
"name=entity,type=CephString "
"name=caps,type=CephString,n=N,req=false",
"add auth info for <entity> from input file, or random key if no input given, and/or any caps specified in the command",
"auth", "rwx")
COMMAND("auth get-or-create-pending "
"name=entity,type=CephString",
"generate and/or retrieve existing pending key (rotated into place on first use)",
"auth", "rwx")
COMMAND("auth clear-pending "
"name=entity,type=CephString",
"clear pending key",
"auth", "rwx")
COMMAND("auth commit-pending "
"name=entity,type=CephString",
"rotate pending key into active position",
"auth", "rwx")
COMMAND("fs authorize "
"name=filesystem,type=CephString "
"name=entity,type=CephString "
"name=caps,type=CephString,n=N",
"add auth for <entity> to access file system <filesystem> based on following directory and permissions pairs",
"auth", "rwx")
COMMAND("auth caps "
"name=entity,type=CephString "
"name=caps,type=CephString,n=N",
"update caps for <name> from caps specified in the command",
"auth", "rwx")
COMMAND_WITH_FLAG("auth del "
"name=entity,type=CephString",
"delete all caps for <name>",
"auth", "rwx",
FLAG(DEPRECATED))
COMMAND("auth rm "
"name=entity,type=CephString",
"remove all caps for <name>",
"auth", "rwx")
/*
* Monitor commands (Monitor.cc)
*/
COMMAND_WITH_FLAG("compact", "cause compaction of monitor's RocksDB storage",
"mon", "rw",
FLAG(TELL))
COMMAND("fsid", "show cluster FSID/UUID", "mon", "r")
COMMAND("log name=logtext,type=CephString,n=N",
"log supplied text to the monitor log", "mon", "rw")
COMMAND("log last "
"name=num,type=CephInt,range=1,req=false "
"name=level,type=CephChoices,strings=debug|info|sec|warn|error,req=false "
"name=channel,type=CephChoices,strings=*|cluster|audit|cephadm,req=false",
"print last few lines of the cluster log",
"mon", "r")
COMMAND("status", "show cluster status", "mon", "r")
COMMAND("health name=detail,type=CephChoices,strings=detail,req=false",
"show cluster health", "mon", "r")
COMMAND("health mute "\
"name=code,type=CephString "
"name=ttl,type=CephString,req=false "
"name=sticky,type=CephBool,req=false",
"mute health alert", "mon", "w")
COMMAND("health unmute "\
"name=code,type=CephString,req=false",
"unmute existing health alert mute(s)", "mon", "w")
COMMAND("time-sync-status", "show time sync status", "mon", "r")
COMMAND("df name=detail,type=CephChoices,strings=detail,req=false",
"show cluster free space stats", "mon", "r")
COMMAND("report name=tags,type=CephString,n=N,req=false",
"report full status of cluster, optional title tag strings",
"mon", "r")
COMMAND("features", "report of connected features",
"mon", "r")
COMMAND("quorum_status", "report status of monitor quorum",
"mon", "r")
COMMAND("mon ok-to-stop "
"name=ids,type=CephString,n=N",
"check whether mon(s) can be safely stopped without reducing immediate "
"availability",
"mon", "r")
COMMAND("mon ok-to-add-offline",
"check whether adding a mon and not starting it would break quorum",
"mon", "r")
COMMAND("mon ok-to-rm "
"name=id,type=CephString",
"check whether removing the specified mon would break quorum",
"mon", "r")
COMMAND("tell "
"name=target,type=CephName "
"name=args,type=CephString,n=N",
"send a command to a specific daemon", "mon", "rw")
COMMAND_WITH_FLAG("version", "show mon daemon version", "mon", "r",
FLAG(TELL))
COMMAND("node ls "
"name=type,type=CephChoices,strings=all|osd|mon|mds|mgr,req=false",
"list all nodes in cluster [type]", "mon", "r")
/*
* Monitor-specific commands under module 'mon'
*/
COMMAND_WITH_FLAG("mon scrub",
"scrub the monitor stores",
"mon", "rw",
FLAG(NONE))
COMMAND("mon metadata name=id,type=CephString,req=false",
"fetch metadata for mon <id>",
"mon", "r")
COMMAND("mon count-metadata name=property,type=CephString",
"count mons by metadata field property",
"mon", "r")
COMMAND("mon versions",
"check running versions of monitors",
"mon", "r")
COMMAND("versions",
"check running versions of ceph daemons",
"mon", "r")
/*
* MDS commands (MDSMonitor.cc)
*/
#define FS_NAME_GOODCHARS "[A-Za-z0-9-_.]"
COMMAND_WITH_FLAG("mds stat", "show MDS status", "mds", "r", FLAG(HIDDEN))
COMMAND("fs dump "
"name=epoch,type=CephInt,req=false,range=0",
"dump all CephFS status, optionally from epoch", "mds", "r")
COMMAND("mds metadata name=who,type=CephString,req=false",
"fetch metadata for mds <role>",
"mds", "r")
COMMAND("mds count-metadata name=property,type=CephString",
"count MDSs by metadata field property",
"mds", "r")
COMMAND("mds versions",
"check running versions of MDSs",
"mds", "r")
COMMAND("mds ok-to-stop name=ids,type=CephString,n=N",
"check whether stopping the specified MDS would reduce immediate availability",
"mds", "r")
COMMAND_WITH_FLAG("mds freeze name=role_or_gid,type=CephString"
" name=val,type=CephString",
"freeze MDS yes/no", "mds", "rw", FLAG(HIDDEN))
// arbitrary limit 0-20 below; worth standing on head to make it
// relate to actual state definitions?
// #include "include/ceph_fs.h"
COMMAND_WITH_FLAG("mds set_state "
"name=gid,type=CephInt,range=0 "
"name=state,type=CephInt,range=0|20",
"set mds state of <gid> to <numeric-state>", "mds", "rw", FLAG(HIDDEN))
COMMAND("mds fail name=role_or_gid,type=CephString",
"Mark MDS failed: trigger a failover if a standby is available",
"mds", "rw")
COMMAND("mds repaired name=role,type=CephString",
"mark a damaged MDS rank as no longer damaged", "mds", "rw")
COMMAND("mds rm "
"name=gid,type=CephInt,range=0",
"remove nonactive mds", "mds", "rw")
COMMAND_WITH_FLAG("mds rmfailed name=role,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"remove failed rank", "mds", "rw", FLAG(HIDDEN))
COMMAND_WITH_FLAG("mds compat show", "show mds compatibility settings",
"mds", "r", FLAG(DEPRECATED))
COMMAND("fs compat show "
"name=fs_name,type=CephString ",
"show fs compatibility settings",
"mds", "r")
COMMAND_WITH_FLAG("mds compat rm_compat "
"name=feature,type=CephInt,range=0",
"remove compatible feature", "mds", "rw", FLAG(DEPRECATED))
COMMAND_WITH_FLAG("mds compat rm_incompat "
"name=feature,type=CephInt,range=0",
"remove incompatible feature", "mds", "rw", FLAG(DEPRECATED))
COMMAND("fs new "
"name=fs_name,type=CephString,goodchars=" FS_NAME_GOODCHARS
" name=metadata,type=CephString "
"name=data,type=CephString "
"name=force,type=CephBool,req=false "
"name=allow_dangerous_metadata_overlay,type=CephBool,req=false "
"name=fscid,type=CephInt,range=0,req=false "
"name=recover,type=CephBool,req=false",
"make new filesystem using named pools <metadata> and <data>",
"fs", "rw")
COMMAND("fs fail "
"name=fs_name,type=CephString ",
"bring the file system down and all of its ranks",
"fs", "rw")
COMMAND("fs rm "
"name=fs_name,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"disable the named filesystem",
"fs", "rw")
COMMAND("fs reset "
"name=fs_name,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"disaster recovery only: reset to a single-MDS map",
"fs", "rw")
COMMAND("fs ls ",
"list filesystems",
"fs", "r")
COMMAND("fs get name=fs_name,type=CephString",
"get info about one filesystem",
"fs", "r")
COMMAND("fs set "
"name=fs_name,type=CephString "
"name=var,type=CephChoices,strings=max_mds|max_file_size"
"|allow_new_snaps|inline_data|cluster_down|allow_dirfrags|balancer"
"|standby_count_wanted|session_timeout|session_autoclose"
"|allow_standby_replay|down|joinable|min_compat_client|bal_rank_mask"
"|refuse_client_session|max_xattr_size "
"name=val,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false "
"name=yes_i_really_really_mean_it,type=CephBool,req=false",
"set fs parameter <var> to <val>", "mds", "rw")
COMMAND("fs flag set name=flag_name,type=CephChoices,strings=enable_multiple "
"name=val,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"Set a global CephFS flag",
"fs", "rw")
COMMAND("fs feature ls",
"list available cephfs features to be set/unset",
"mds", "r")
COMMAND("fs lsflags name=fs_name,type=CephString",
"list the flags set on a ceph filesystem",
"fs", "r")
COMMAND("fs compat "
"name=fs_name,type=CephString "
"name=subop,type=CephChoices,strings=rm_compat|rm_incompat|add_compat|add_incompat "
"name=feature,type=CephInt "
"name=feature_str,type=CephString,req=false ",
"manipulate compat settings", "fs", "rw")
COMMAND("fs required_client_features "
"name=fs_name,type=CephString "
"name=subop,type=CephChoices,strings=add|rm "
"name=val,type=CephString ",
"add/remove required features of clients", "mds", "rw")
COMMAND("fs add_data_pool name=fs_name,type=CephString "
"name=pool,type=CephString",
"add data pool <pool>", "mds", "rw")
COMMAND("fs rm_data_pool name=fs_name,type=CephString "
"name=pool,type=CephString",
"remove data pool <pool>", "mds", "rw")
COMMAND_WITH_FLAG("fs set_default name=fs_name,type=CephString",
"set the default to the named filesystem",
"fs", "rw",
FLAG(DEPRECATED))
COMMAND("fs set-default name=fs_name,type=CephString",
"set the default to the named filesystem",
"fs", "rw")
COMMAND("fs mirror enable "
"name=fs_name,type=CephString ",
"enable mirroring for a ceph filesystem", "mds", "rw")
COMMAND("fs mirror disable "
"name=fs_name,type=CephString ",
"disable mirroring for a ceph filesystem", "mds", "rw")
COMMAND("fs mirror peer_add "
"name=fs_name,type=CephString "
"name=uuid,type=CephString "
"name=remote_cluster_spec,type=CephString "
"name=remote_fs_name,type=CephString",
"add a mirror peer for a ceph filesystem", "mds", "rw")
COMMAND("fs mirror peer_remove "
"name=fs_name,type=CephString "
"name=uuid,type=CephString ",
"remove a mirror peer for a ceph filesystem", "mds", "rw")
COMMAND("fs rename "
"name=fs_name,type=CephString "
"name=new_fs_name,type=CephString,goodchars=" FS_NAME_GOODCHARS
" name=yes_i_really_mean_it,type=CephBool,req=false",
"rename a ceph file system", "mds", "rw")
/*
* Monmap commands
*/
COMMAND("mon dump "
"name=epoch,type=CephInt,range=0,req=false",
"dump formatted monmap (optionally from epoch)",
"mon", "r")
COMMAND("mon stat", "summarize monitor status", "mon", "r")
COMMAND("mon getmap "
"name=epoch,type=CephInt,range=0,req=false",
"get monmap", "mon", "r")
COMMAND("mon add "
"name=name,type=CephString "
"name=addr,type=CephIPAddr "
"name=location,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=],req=false",
"add new monitor named <name> at <addr>, possibly with CRUSH location <location>", "mon", "rw")
COMMAND("mon rm "
"name=name,type=CephString",
"remove monitor named <name>", "mon", "rw")
COMMAND_WITH_FLAG("mon remove "
"name=name,type=CephString",
"remove monitor named <name>", "mon", "rw",
FLAG(DEPRECATED))
COMMAND("mon feature ls "
"name=with_value,type=CephBool,req=false",
"list available mon map features to be set/unset",
"mon", "r")
COMMAND("mon feature set "
"name=feature_name,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"set provided feature on mon map",
"mon", "rw")
COMMAND("mon set-rank "
"name=name,type=CephString "
"name=rank,type=CephInt",
"set the rank for the specified mon",
"mon", "rw")
COMMAND("mon set-addrs "
"name=name,type=CephString "
"name=addrs,type=CephString",
"set the addrs (IPs and ports) a specific monitor binds to",
"mon", "rw")
COMMAND("mon set-weight "
"name=name,type=CephString "
"name=weight,type=CephInt,range=0|65535",
"set the weight for the specified mon",
"mon", "rw")
COMMAND("mon enable-msgr2",
"enable the msgr2 protocol on port 3300",
"mon", "rw")
COMMAND("mon set election_strategy " \
"name=strategy,type=CephString", \
"set the election strategy to use; choices classic, disallow, connectivity", \
"mon", "rw")
COMMAND("mon add disallowed_leader " \
"name=name,type=CephString", \
"prevent the named mon from being a leader", \
"mon", "rw")
COMMAND("mon rm disallowed_leader " \
"name=name,type=CephString", \
"allow the named mon to be a leader again", \
"mon", "rw")
COMMAND("mon set_location " \
"name=name,type=CephString "
"name=args,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=]",
"specify location <args> for the monitor <name>, using CRUSH bucket names", \
"mon", "rw")
COMMAND("mon enable_stretch_mode " \
"name=tiebreaker_mon,type=CephString, "
"name=new_crush_rule,type=CephString, "
"name=dividing_bucket,type=CephString, ",
"enable stretch mode, changing the peering rules and "
"failure handling on all pools with <tiebreaker_mon> "
"as the tiebreaker and setting <dividing_bucket> locations "
"as the units for stretching across",
"mon", "rw")
COMMAND("mon set_new_tiebreaker " \
"name=name,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"switch the stretch tiebreaker to be the named mon", \
"mon", "rw")
/*
* OSD commands
*/
COMMAND("osd stat", "print summary of OSD map", "osd", "r")
COMMAND("osd dump "
"name=epoch,type=CephInt,range=0,req=false",
"print summary of OSD map", "osd", "r")
COMMAND("osd info "
"name=id,type=CephOsdName,req=false",
"print osd's {id} information (instead of all osds from map)",
"osd", "r")
COMMAND("osd tree "
"name=epoch,type=CephInt,range=0,req=false "
"name=states,type=CephChoices,strings=up|down|in|out|destroyed,n=N,req=false",
"print OSD tree", "osd", "r")
COMMAND("osd tree-from "
"name=epoch,type=CephInt,range=0,req=false "
"name=bucket,type=CephString "
"name=states,type=CephChoices,strings=up|down|in|out|destroyed,n=N,req=false",
"print OSD tree in bucket", "osd", "r")
COMMAND("osd ls "
"name=epoch,type=CephInt,range=0,req=false",
"show all OSD ids", "osd", "r")
COMMAND("osd getmap "
"name=epoch,type=CephInt,range=0,req=false",
"get OSD map", "osd", "r")
COMMAND("osd getcrushmap "
"name=epoch,type=CephInt,range=0,req=false",
"get CRUSH map", "osd", "r")
COMMAND("osd getmaxosd", "show largest OSD id", "osd", "r")
COMMAND("osd ls-tree "
"name=epoch,type=CephInt,range=0,req=false "
"name=name,type=CephString,req=true",
"show OSD ids under bucket <name> in the CRUSH map",
"osd", "r")
COMMAND("osd find "
"name=id,type=CephOsdName",
"find osd <id> in the CRUSH map and show its location",
"osd", "r")
COMMAND("osd metadata "
"name=id,type=CephOsdName,req=false",
"fetch metadata for osd {id} (default all)",
"osd", "r")
COMMAND("osd count-metadata name=property,type=CephString",
"count OSDs by metadata field property",
"osd", "r")
COMMAND("osd versions",
"check running versions of OSDs",
"osd", "r")
COMMAND("osd numa-status",
"show NUMA status of OSDs",
"osd", "r")
COMMAND("osd map "
"name=pool,type=CephPoolname "
"name=object,type=CephObjectname "
"name=nspace,type=CephString,req=false",
"find pg for <object> in <pool> with [namespace]", "osd", "r")
COMMAND_WITH_FLAG("osd lspools",
"list pools", "osd", "r", FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd crush rule list", "list crush rules", "osd", "r",
FLAG(DEPRECATED))
COMMAND("osd crush rule ls", "list crush rules", "osd", "r")
COMMAND("osd crush rule ls-by-class "
"name=class,type=CephString,goodchars=[A-Za-z0-9-_.]",
"list all crush rules that reference the same <class>",
"osd", "r")
COMMAND("osd crush rule dump "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.],req=false",
"dump crush rule <name> (default all)",
"osd", "r")
COMMAND("osd crush dump",
"dump crush map",
"osd", "r")
COMMAND("osd setcrushmap name=prior_version,type=CephInt,req=false",
"set crush map from input file",
"osd", "rw")
COMMAND("osd crush set name=prior_version,type=CephInt,req=false",
"set crush map from input file",
"osd", "rw")
COMMAND("osd crush add-bucket "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=type,type=CephString "
"name=args,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=],req=false",
"add no-parent (probably root) crush bucket <name> of type <type> "
"to location <args>",
"osd", "rw")
COMMAND("osd crush rename-bucket "
"name=srcname,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=dstname,type=CephString,goodchars=[A-Za-z0-9-_.]",
"rename bucket <srcname> to <dstname>",
"osd", "rw")
COMMAND("osd crush set "
"name=id,type=CephOsdName "
"name=weight,type=CephFloat,range=0.0 "
"name=args,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=]",
"update crushmap position and weight for <name> to <weight> with location <args>",
"osd", "rw")
COMMAND("osd crush add "
"name=id,type=CephOsdName "
"name=weight,type=CephFloat,range=0.0 "
"name=args,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=]",
"add or update crushmap position and weight for <name> with <weight> and location <args>",
"osd", "rw")
COMMAND("osd crush set-all-straw-buckets-to-straw2",
"convert all CRUSH current straw buckets to use the straw2 algorithm",
"osd", "rw")
COMMAND("osd crush class create "
"name=class,type=CephString,goodchars=[A-Za-z0-9-_]",
"create crush device class <class>",
"osd", "rw")
COMMAND("osd crush class rm "
"name=class,type=CephString,goodchars=[A-Za-z0-9-_]",
"remove crush device class <class>",
"osd", "rw")
COMMAND("osd crush set-device-class "
"name=class,type=CephString "
"name=ids,type=CephString,n=N",
"set the <class> of the osd(s) <id> [<id>...],"
"or use <all|any> to set all.",
"osd", "rw")
COMMAND("osd crush rm-device-class "
"name=ids,type=CephString,n=N",
"remove class of the osd(s) <id> [<id>...],"
"or use <all|any> to remove all.",
"osd", "rw")
COMMAND("osd crush class rename "
"name=srcname,type=CephString,goodchars=[A-Za-z0-9-_] "
"name=dstname,type=CephString,goodchars=[A-Za-z0-9-_]",
"rename crush device class <srcname> to <dstname>",
"osd", "rw")
COMMAND("osd crush create-or-move "
"name=id,type=CephOsdName "
"name=weight,type=CephFloat,range=0.0 "
"name=args,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=]",
"create entry or move existing entry for <name> <weight> at/to location <args>",
"osd", "rw")
COMMAND("osd crush move "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=args,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=]",
"move existing entry for <name> to location <args>",
"osd", "rw")
COMMAND("osd crush swap-bucket "
"name=source,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=dest,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"swap existing bucket contents from (orphan) bucket <source> and <target>",
"osd", "rw")
COMMAND("osd crush link "
"name=name,type=CephString "
"name=args,type=CephString,n=N,goodchars=[A-Za-z0-9-_.=]",
"link existing entry for <name> under location <args>",
"osd", "rw")
COMMAND("osd crush rm "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=ancestor,type=CephString,req=false,goodchars=[A-Za-z0-9-_.]",
"remove <name> from crush map (everywhere, or just at <ancestor>)",\
"osd", "rw")
COMMAND_WITH_FLAG("osd crush remove "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=ancestor,type=CephString,req=false,goodchars=[A-Za-z0-9-_.]",
"remove <name> from crush map (everywhere, or just at <ancestor>)",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND("osd crush unlink "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=ancestor,type=CephString,req=false,goodchars=[A-Za-z0-9-_.]",
"unlink <name> from crush map (everywhere, or just at <ancestor>)",
"osd", "rw")
COMMAND("osd crush reweight-all",
"recalculate the weights for the tree to ensure they sum correctly",
"osd", "rw")
COMMAND("osd crush reweight "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=weight,type=CephFloat,range=0.0",
"change <name>'s weight to <weight> in crush map",
"osd", "rw")
COMMAND("osd crush reweight-subtree "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=weight,type=CephFloat,range=0.0",
"change all leaf items beneath <name> to <weight> in crush map",
"osd", "rw")
COMMAND("osd crush tunables "
"name=profile,type=CephChoices,strings=legacy|argonaut|bobtail|firefly|hammer|jewel|optimal|default",
"set crush tunables values to <profile>", "osd", "rw")
COMMAND("osd crush set-tunable "
"name=tunable,type=CephChoices,strings=straw_calc_version "
"name=value,type=CephInt",
"set crush tunable <tunable> to <value>",
"osd", "rw")
COMMAND("osd crush get-tunable "
"name=tunable,type=CephChoices,strings=straw_calc_version",
"get crush tunable <tunable>",
"osd", "r")
COMMAND("osd crush show-tunables",
"show current crush tunables", "osd", "r")
COMMAND("osd crush rule create-simple "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=root,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=type,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=mode,type=CephChoices,strings=firstn|indep,req=false",
"create crush rule <name> to start from <root>, replicate across buckets of type <type>, using a choose mode of <firstn|indep> (default firstn; indep best for erasure pools)",
"osd", "rw")
COMMAND("osd crush rule create-replicated "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=root,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=type,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=class,type=CephString,goodchars=[A-Za-z0-9-_.],req=false",
"create crush rule <name> for replicated pool to start from <root>, replicate across buckets of type <type>, use devices of type <class> (ssd or hdd)",
"osd", "rw")
COMMAND("osd crush rule create-erasure "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=profile,type=CephString,req=false,goodchars=[A-Za-z0-9-_.=]",
"create crush rule <name> for erasure coded pool created with <profile> (default default)",
"osd", "rw")
COMMAND("osd crush rule rm "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] ",
"remove crush rule <name>", "osd", "rw")
COMMAND("osd crush rule rename "
"name=srcname,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=dstname,type=CephString,goodchars=[A-Za-z0-9-_.]",
"rename crush rule <srcname> to <dstname>",
"osd", "rw")
COMMAND("osd crush tree "
"name=show_shadow,type=CephBool,req=false",
"dump crush buckets and items in a tree view",
"osd", "r")
COMMAND("osd crush ls name=node,type=CephString,goodchars=[A-Za-z0-9-_.]",
"list items beneath a node in the CRUSH tree",
"osd", "r")
COMMAND("osd crush class ls",
"list all crush device classes",
"osd", "r")
COMMAND("osd crush class ls-osd "
"name=class,type=CephString,goodchars=[A-Za-z0-9-_]",
"list all osds belonging to the specific <class>",
"osd", "r")
COMMAND("osd crush get-device-class "
"name=ids,type=CephString,n=N",
"get classes of specified osd(s) <id> [<id>...]",
"osd", "r")
COMMAND("osd crush weight-set ls",
"list crush weight sets",
"osd", "r")
COMMAND("osd crush weight-set dump",
"dump crush weight sets",
"osd", "r")
COMMAND("osd crush weight-set create-compat",
"create a default backward-compatible weight-set",
"osd", "rw")
COMMAND("osd crush weight-set create "
"name=pool,type=CephPoolname "\
"name=mode,type=CephChoices,strings=flat|positional",
"create a weight-set for a given pool",
"osd", "rw")
COMMAND("osd crush weight-set rm name=pool,type=CephPoolname",
"remove the weight-set for a given pool",
"osd", "rw")
COMMAND("osd crush weight-set rm-compat",
"remove the backward-compatible weight-set",
"osd", "rw")
COMMAND("osd crush weight-set reweight "
"name=pool,type=CephPoolname "
"name=item,type=CephString "
"name=weight,type=CephFloat,range=0.0,n=N",
"set weight for an item (bucket or osd) in a pool's weight-set",
"osd", "rw")
COMMAND("osd crush weight-set reweight-compat "
"name=item,type=CephString "
"name=weight,type=CephFloat,range=0.0,n=N",
"set weight for an item (bucket or osd) in the backward-compatible weight-set",
"osd", "rw")
COMMAND("osd setmaxosd "
"name=newmax,type=CephInt,range=0",
"set new maximum osd value", "osd", "rw")
COMMAND("osd set-full-ratio "
"name=ratio,type=CephFloat,range=0.0|1.0",
"set usage ratio at which OSDs are marked full",
"osd", "rw")
COMMAND("osd set-backfillfull-ratio "
"name=ratio,type=CephFloat,range=0.0|1.0",
"set usage ratio at which OSDs are marked too full to backfill",
"osd", "rw")
COMMAND("osd set-nearfull-ratio "
"name=ratio,type=CephFloat,range=0.0|1.0",
"set usage ratio at which OSDs are marked near-full",
"osd", "rw")
COMMAND("osd get-require-min-compat-client",
"get the minimum client version we will maintain compatibility with",
"osd", "r")
COMMAND("osd set-require-min-compat-client "
"name=version,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"set the minimum client version we will maintain compatibility with",
"osd", "rw")
COMMAND("osd pause", "pause osd", "osd", "rw")
COMMAND("osd unpause", "unpause osd", "osd", "rw")
COMMAND("osd erasure-code-profile set "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=profile,type=CephString,n=N,req=false "
"name=force,type=CephBool,req=false",
"create erasure code profile <name> with [<key[=value]> ...] pairs. Add a --force at the end to override an existing profile (VERY DANGEROUS)",
"osd", "rw")
COMMAND("osd erasure-code-profile get "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.]",
"get erasure code profile <name>",
"osd", "r")
COMMAND("osd erasure-code-profile rm "
"name=name,type=CephString,goodchars=[A-Za-z0-9-_.]",
"remove erasure code profile <name>",
"osd", "rw")
COMMAND("osd erasure-code-profile ls",
"list all erasure code profiles",
"osd", "r")
COMMAND("osd set "
"name=key,type=CephChoices,strings=full|pause|noup|nodown|"
"noout|noin|nobackfill|norebalance|norecover|noscrub|nodeep-scrub|"
"notieragent|nosnaptrim|pglog_hardlimit "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"set <key>", "osd", "rw")
COMMAND("osd unset "
"name=key,type=CephChoices,strings=full|pause|noup|nodown|"\
"noout|noin|nobackfill|norebalance|norecover|noscrub|nodeep-scrub|"
"notieragent|nosnaptrim",
"unset <key>", "osd", "rw")
COMMAND("osd require-osd-release "\
"name=release,type=CephChoices,strings=octopus|pacific|quincy|reef "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"set the minimum allowed OSD release to participate in the cluster",
"osd", "rw")
COMMAND("osd down "
"name=ids,type=CephString,n=N "
"name=definitely_dead,type=CephBool,req=false",
"set osd(s) <id> [<id>...] down, "
"or use <any|all> to set all osds down",
"osd", "rw")
COMMAND("osd stop "
"type=CephString,name=ids,n=N",
"stop the corresponding osd daemons and mark them as down",
"osd", "rw")
COMMAND("osd out "
"name=ids,type=CephString,n=N",
"set osd(s) <id> [<id>...] out, "
"or use <any|all> to set all osds out",
"osd", "rw")
COMMAND("osd in "
"name=ids,type=CephString,n=N",
"set osd(s) <id> [<id>...] in, "
"can use <any|all> to automatically set all previously out osds in",
"osd", "rw")
COMMAND_WITH_FLAG("osd rm "
"name=ids,type=CephString,n=N",
"remove osd(s) <id> [<id>...], "
"or use <any|all> to remove all osds",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd add-noup "
"name=ids,type=CephString,n=N",
"mark osd(s) <id> [<id>...] as noup, "
"or use <all|any> to mark all osds as noup",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd add-nodown "
"name=ids,type=CephString,n=N",
"mark osd(s) <id> [<id>...] as nodown, "
"or use <all|any> to mark all osds as nodown",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd add-noin "
"name=ids,type=CephString,n=N",
"mark osd(s) <id> [<id>...] as noin, "
"or use <all|any> to mark all osds as noin",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd add-noout "
"name=ids,type=CephString,n=N",
"mark osd(s) <id> [<id>...] as noout, "
"or use <all|any> to mark all osds as noout",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd rm-noup "
"name=ids,type=CephString,n=N",
"allow osd(s) <id> [<id>...] to be marked up "
"(if they are currently marked as noup), "
"can use <all|any> to automatically filter out all noup osds",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd rm-nodown "
"name=ids,type=CephString,n=N",
"allow osd(s) <id> [<id>...] to be marked down "
"(if they are currently marked as nodown), "
"can use <all|any> to automatically filter out all nodown osds",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd rm-noin "
"name=ids,type=CephString,n=N",
"allow osd(s) <id> [<id>...] to be marked in "
"(if they are currently marked as noin), "
"can use <all|any> to automatically filter out all noin osds",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd rm-noout "
"name=ids,type=CephString,n=N",
"allow osd(s) <id> [<id>...] to be marked out "
"(if they are currently marked as noout), "
"can use <all|any> to automatically filter out all noout osds",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND("osd set-group "
"name=flags,type=CephString "
"name=who,type=CephString,n=N",
"set <flags> for batch osds or crush nodes, "
"<flags> must be a comma-separated subset of {noup,nodown,noin,noout}",
"osd", "rw")
COMMAND("osd unset-group "
"name=flags,type=CephString "
"name=who,type=CephString,n=N",
"unset <flags> for batch osds or crush nodes, "
"<flags> must be a comma-separated subset of {noup,nodown,noin,noout}",
"osd", "rw")
COMMAND("osd reweight "
"name=id,type=CephOsdName "
"type=CephFloat,name=weight,range=0.0|1.0",
"reweight osd to 0.0 < <weight> < 1.0", "osd", "rw")
COMMAND("osd reweightn "
"name=weights,type=CephString",
"reweight osds with {<id>: <weight>,...}",
"osd", "rw")
COMMAND("osd force-create-pg "
"name=pgid,type=CephPgid "\
"name=yes_i_really_mean_it,type=CephBool,req=false",
"force creation of pg <pgid>",
"osd", "rw")
COMMAND("osd pg-temp "
"name=pgid,type=CephPgid "
"name=id,type=CephOsdName,n=N,req=false",
"set pg_temp mapping <pgid>:[<id> [<id>...]] (developers only)",
"osd", "rw")
COMMAND("osd pg-upmap "
"name=pgid,type=CephPgid "
"name=id,type=CephOsdName,n=N",
"set pg_upmap mapping <pgid>:[<id> [<id>...]] (developers only)",
"osd", "rw")
COMMAND("osd rm-pg-upmap "
"name=pgid,type=CephPgid",
"clear pg_upmap mapping for <pgid> (developers only)",
"osd", "rw")
COMMAND("osd pg-upmap-items "
"name=pgid,type=CephPgid "
"name=id,type=CephOsdName,n=N",
"set pg_upmap_items mapping <pgid>:{<id> to <id>, [...]} (developers only)",
"osd", "rw")
COMMAND("osd rm-pg-upmap-items "
"name=pgid,type=CephPgid",
"clear pg_upmap_items mapping for <pgid> (developers only)",
"osd", "rw")
COMMAND("osd pg-upmap-primary "
"name=pgid,type=CephPgid "
"name=id,type=CephOsdName ",
"set pg primary osd <pgid>:<id> (id (osd) must be part of pgid)",
"osd", "rw")
COMMAND("osd rm-pg-upmap-primary "
"name=pgid,type=CephPgid ",
"clear pg primary setting for <pgid>",
"osd", "rw")
COMMAND("osd primary-temp "
"name=pgid,type=CephPgid "
"name=id,type=CephOsdName",
"set primary_temp mapping pgid:<id> (developers only)",
"osd", "rw")
COMMAND("osd rm-primary-temp "
"name=pgid,type=CephPgid ",
"clear primary_temp mapping pgid (developers only)",
"osd", "rw")
COMMAND("osd primary-affinity "
"name=id,type=CephOsdName "
"type=CephFloat,name=weight,range=0.0|1.0",
"adjust osd primary-affinity from 0.0 <= <weight> <= 1.0",
"osd", "rw")
COMMAND_WITH_FLAG("osd destroy-actual "
"name=id,type=CephOsdName "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"mark osd as being destroyed. Keeps the ID intact (allowing reuse), "
"but removes cephx keys, config-key data and lockbox keys, "\
"rendering data permanently unreadable.",
"osd", "rw", FLAG(HIDDEN))
COMMAND("osd purge-new "
"name=id,type=CephOsdName "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"purge all traces of an OSD that was partially created but never "
"started",
"osd", "rw")
COMMAND_WITH_FLAG("osd purge-actual "
"name=id,type=CephOsdName "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"purge all osd data from the monitors. Combines `osd destroy`, "
"`osd rm`, and `osd crush rm`.",
"osd", "rw", FLAG(HIDDEN))
COMMAND("osd lost "
"name=id,type=CephOsdName "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"mark osd as permanently lost. THIS DESTROYS DATA IF NO MORE REPLICAS EXIST, BE CAREFUL",
"osd", "rw")
COMMAND_WITH_FLAG("osd create "
"name=uuid,type=CephUUID,req=false "
"name=id,type=CephOsdName,req=false",
"create new osd (with optional UUID and ID)", "osd", "rw",
FLAG(DEPRECATED))
COMMAND("osd new "
"name=uuid,type=CephUUID,req=true "
"name=id,type=CephOsdName,req=false",
"Create a new OSD. If supplied, the `id` to be replaced needs to "
"exist and have been previously destroyed. "
"Reads secrets from JSON file via `-i <file>` (see man page).",
"osd", "rw")
COMMAND("osd blocklist "
"name=range,type=CephString,goodchars=[range],req=false "
"name=blocklistop,type=CephChoices,strings=add|rm "
"name=addr,type=CephEntityAddr "
"name=expire,type=CephFloat,range=0.0,req=false",
"add (optionally until <expire> seconds from now) or remove <addr> from blocklist",
"osd", "rw")
COMMAND("osd blocklist ls", "show blocklisted clients", "osd", "r")
COMMAND("osd blocklist clear", "clear all blocklisted clients", "osd", "rw")
COMMAND_WITH_FLAG("osd blacklist "
"name=blacklistop,type=CephChoices,strings=add|rm "
"name=addr,type=CephEntityAddr "
"name=expire,type=CephFloat,range=0.0,req=false",
"add (optionally until <expire> seconds from now) or remove <addr> from blacklist",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd blacklist ls", "show blacklisted clients", "osd", "r",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("osd blacklist clear", "clear all blacklisted clients", "osd", "rw",
FLAG(DEPRECATED))
COMMAND("osd pool mksnap "
"name=pool,type=CephPoolname "
"name=snap,type=CephString",
"make snapshot <snap> in <pool>", "osd", "rw")
COMMAND("osd pool rmsnap "
"name=pool,type=CephPoolname "
"name=snap,type=CephString",
"remove snapshot <snap> from <pool>", "osd", "rw")
COMMAND("osd pool ls "
"name=detail,type=CephChoices,strings=detail,req=false",
"list pools", "osd", "r")
COMMAND("osd pool create "
"name=pool,type=CephPoolname "
"name=pg_num,type=CephInt,range=0,req=false "
"name=pgp_num,type=CephInt,range=0,req=false "
"name=pool_type,type=CephChoices,strings=replicated|erasure,req=false "
"name=erasure_code_profile,type=CephString,req=false,goodchars=[A-Za-z0-9-_.] "
"name=rule,type=CephString,req=false "
"name=expected_num_objects,type=CephInt,range=0,req=false "
"name=size,type=CephInt,range=0,req=false "
"name=pg_num_min,type=CephInt,range=0,req=false "
"name=pg_num_max,type=CephInt,range=0,req=false "
"name=autoscale_mode,type=CephChoices,strings=on|off|warn,req=false "
"name=bulk,type=CephBool,req=false "
"name=target_size_bytes,type=CephInt,range=0,req=false "
"name=target_size_ratio,type=CephFloat,range=0.0,req=false "\
"name=yes_i_really_mean_it,type=CephBool,req=false"
"name=crimson,type=CephBool,req=false",
"create pool", "osd", "rw")
COMMAND_WITH_FLAG("osd pool delete "
"name=pool,type=CephPoolname "
"name=pool2,type=CephPoolname,req=false "
"name=yes_i_really_really_mean_it,type=CephBool,req=false "
"name=yes_i_really_really_mean_it_not_faking,type=CephBool,req=false ",
"delete pool",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND("osd pool rm "
"name=pool,type=CephPoolname "
"name=pool2,type=CephPoolname,req=false "
"name=yes_i_really_really_mean_it,type=CephBool,req=false "
"name=yes_i_really_really_mean_it_not_faking,type=CephBool,req=false ",
"remove pool",
"osd", "rw")
COMMAND("osd pool rename "
"name=srcpool,type=CephPoolname "
"name=destpool,type=CephPoolname "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"rename <srcpool> to <destpool>", "osd", "rw")
COMMAND("osd pool get "
"name=pool,type=CephPoolname "
"name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites|fingerprint_algorithm|pg_autoscale_mode|pg_autoscale_bias|pg_num_min|pg_num_max|target_size_bytes|target_size_ratio|dedup_tier|dedup_chunk_algorithm|dedup_cdc_chunk_size|eio|bulk",
"get pool parameter <var>", "osd", "r")
COMMAND("osd pool set "
"name=pool,type=CephPoolname "
"name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|pgp_num_actual|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites|fingerprint_algorithm|pg_autoscale_mode|pg_autoscale_bias|pg_num_min|pg_num_max|target_size_bytes|target_size_ratio|dedup_tier|dedup_chunk_algorithm|dedup_cdc_chunk_size|eio|bulk "
"name=val,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"set pool parameter <var> to <val>", "osd", "rw")
// 'val' is a CephString because it can include a unit. Perhaps
// there should be a Python type for validation/conversion of strings
// with units.
COMMAND("osd pool set-quota "
"name=pool,type=CephPoolname "
"name=field,type=CephChoices,strings=max_objects|max_bytes "
"name=val,type=CephString",
"set object or byte limit on pool", "osd", "rw")
COMMAND("osd pool get-quota "
"name=pool,type=CephPoolname ",
"obtain object or byte limits for pool",
"osd", "r")
COMMAND("osd pool application enable "
"name=pool,type=CephPoolname "
"name=app,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"enable use of an application <app> [cephfs,rbd,rgw] on pool <poolname>",
"osd", "rw")
COMMAND("osd pool application disable "
"name=pool,type=CephPoolname "
"name=app,type=CephString "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"disables use of an application <app> on pool <poolname>",
"osd", "rw")
COMMAND("osd pool application set "
"name=pool,type=CephPoolname "
"name=app,type=CephString "
"name=key,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=value,type=CephString,goodchars=[A-Za-z0-9-_.=]",
"sets application <app> metadata key <key> to <value> on pool <poolname>",
"osd", "rw")
COMMAND("osd pool application rm "
"name=pool,type=CephPoolname "
"name=app,type=CephString "
"name=key,type=CephString",
"removes application <app> metadata key <key> on pool <poolname>",
"osd", "rw")
COMMAND("osd pool application get "
"name=pool,type=CephPoolname,req=fasle "
"name=app,type=CephString,req=false "
"name=key,type=CephString,req=false",
"get value of key <key> of application <app> on pool <poolname>",
"osd", "r")
COMMAND("osd utilization",
"get basic pg distribution stats",
"osd", "r")
COMMAND("osd force_healthy_stretch_mode " \
"name=yes_i_really_mean_it,type=CephBool,req=false",
"force a healthy stretch mode, requiring the full number of CRUSH buckets "
"to peer and letting all non-tiebreaker monitors be elected leader ",
"osd", "rw")
COMMAND("osd force_recovery_stretch_mode " \
"name=yes_i_really_mean_it,type=CephBool,req=false",
"try and force a recovery stretch mode, increasing the "
"pool size to its non-failure value if currently degraded and "
"all monitor buckets are up",
"osd", "rw")
COMMAND("osd set-allow-crimson " \
"name=yes_i_really_mean_it,type=CephBool,req=false",
"Allow crimson-osds to boot and join the cluster. Note, crimson-osd is "
"not yet considered stable and may crash or cause data loss -- should "
"be avoided outside of testing and development. This setting is "
"irrevocable",
"osd", "rw")
// tiering
COMMAND("osd tier add "
"name=pool,type=CephPoolname "
"name=tierpool,type=CephPoolname "
"name=force_nonempty,type=CephBool,req=false",
"add the tier <tierpool> (the second one) to base pool <pool> (the first one)",
"osd", "rw")
COMMAND("osd tier rm "
"name=pool,type=CephPoolname "
"name=tierpool,type=CephPoolname",
"remove the tier <tierpool> (the second one) from base pool <pool> (the first one)",
"osd", "rw")
COMMAND_WITH_FLAG("osd tier remove "
"name=pool,type=CephPoolname "
"name=tierpool,type=CephPoolname",
"remove the tier <tierpool> (the second one) from base pool <pool> (the first one)",
"osd", "rw",
FLAG(DEPRECATED))
COMMAND("osd tier cache-mode "
"name=pool,type=CephPoolname "
"name=mode,type=CephChoices,strings=writeback|proxy|readproxy|readonly|none "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"specify the caching mode for cache tier <pool>", "osd", "rw")
COMMAND("osd tier set-overlay "
"name=pool,type=CephPoolname "
"name=overlaypool,type=CephPoolname",
"set the overlay pool for base pool <pool> to be <overlaypool>", "osd", "rw")
COMMAND("osd tier rm-overlay "
"name=pool,type=CephPoolname ",
"remove the overlay pool for base pool <pool>", "osd", "rw")
COMMAND_WITH_FLAG("osd tier remove-overlay "
"name=pool,type=CephPoolname ",
"remove the overlay pool for base pool <pool>", "osd", "rw",
FLAG(DEPRECATED))
COMMAND("osd tier add-cache "
"name=pool,type=CephPoolname "
"name=tierpool,type=CephPoolname "
"name=size,type=CephInt,range=0",
"add a cache <tierpool> (the second one) of size <size> to existing pool <pool> (the first one)",
"osd", "rw")
/*
* mon/KVMonitor.cc
*/
COMMAND("config-key get "
"name=key,type=CephString",
"get <key>", "config-key", "r")
COMMAND("config-key set "
"name=key,type=CephString "
"name=val,type=CephString,req=false",
"set <key> to value <val>", "config-key", "rw")
COMMAND_WITH_FLAG("config-key put "
"name=key,type=CephString "
"name=val,type=CephString,req=false",
"put <key>, value <val>", "config-key", "rw",
FLAG(DEPRECATED))
COMMAND_WITH_FLAG("config-key del "
"name=key,type=CephString",
"delete <key>", "config-key", "rw",
FLAG(DEPRECATED))
COMMAND("config-key rm "
"name=key,type=CephString",
"rm <key>", "config-key", "rw")
COMMAND("config-key exists "
"name=key,type=CephString",
"check for <key>'s existence", "config-key", "r")
COMMAND_WITH_FLAG("config-key list ", "list keys", "config-key", "r",
FLAG(DEPRECATED))
COMMAND("config-key ls ", "list keys", "config-key", "r")
COMMAND("config-key dump "
"name=key,type=CephString,req=false", "dump keys and values (with optional prefix)", "config-key", "r")
/*
* mon/MgrMonitor.cc
*/
COMMAND("mgr stat",
"dump basic info about the mgr cluster state",
"mgr", "r")
COMMAND("mgr dump "
"name=epoch,type=CephInt,range=0,req=false",
"dump the latest MgrMap",
"mgr", "r")
COMMAND("mgr fail name=who,type=CephString,req=false",
"treat the named manager daemon as failed", "mgr", "rw")
COMMAND("mgr module ls",
"list active mgr modules", "mgr", "r")
COMMAND("mgr services",
"list service endpoints provided by mgr modules",
"mgr", "r")
COMMAND("mgr module enable "
"name=module,type=CephString "
"name=force,type=CephBool,req=false",
"enable mgr module", "mgr", "rw")
COMMAND("mgr module disable "
"name=module,type=CephString",
"disable mgr module", "mgr", "rw")
COMMAND("mgr metadata name=who,type=CephString,req=false",
"dump metadata for all daemons or a specific daemon",
"mgr", "r")
COMMAND("mgr count-metadata name=property,type=CephString",
"count ceph-mgr daemons by metadata field property",
"mgr", "r")
COMMAND("mgr versions",
"check running versions of ceph-mgr daemons",
"mgr", "r")
// ConfigMonitor
COMMAND("config set"
" name=who,type=CephString"
" name=name,type=CephString"
" name=value,type=CephString"
" name=force,type=CephBool,req=false",
"Set a configuration option for one or more entities",
"config", "rw")
COMMAND("config rm"
" name=who,type=CephString"
" name=name,type=CephString",
"Clear a configuration option for one or more entities",
"config", "rw")
COMMAND("config get "
"name=who,type=CephString "
"name=key,type=CephString,req=false",
"Show configuration option(s) for an entity",
"config", "r")
COMMAND("config dump",
"Show all configuration option(s)",
"mon", "r")
COMMAND("config help "
"name=key,type=CephString",
"Describe a configuration option",
"config", "r")
COMMAND("config ls",
"List available configuration options",
"config", "r")
COMMAND("config assimilate-conf",
"Assimilate options from a conf, and return a new, minimal conf file",
"config", "rw")
COMMAND("config log name=num,type=CephInt,req=false",
"Show recent history of config changes",
"config", "r")
COMMAND("config reset "
"name=num,type=CephInt,range=0",
"Revert configuration to a historical version specified by <num>",
"config", "rw")
COMMAND("config generate-minimal-conf",
"Generate a minimal ceph.conf file",
"config", "r")
// these are tell commands that were implemented as CLI commands in
// the broken pre-octopus way that we want to allow to work when a
// monitor has upgraded to octopus+ but the monmap min_mon_release is
// still < octopus. we exclude things that weren't well supported
// before and that aren't implemented by the octopus mon anymore.
//
// the command set below matches the kludge in Monitor::handle_command
// that shunts these off to the asok machinery.
COMMAND_WITH_FLAG("injectargs "
"name=injected_args,type=CephString,n=N",
"inject config arguments into monitor", "mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("smart name=devid,type=CephString,req=false",
"Query health metrics for underlying device",
"mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("mon_status",
"report status of monitors",
"mon", "r",
FLAG(TELL))
COMMAND_WITH_FLAG("heap "
"name=heapcmd,type=CephChoices,strings=dump|start_profiler|stop_profiler|release|stats "
"name=value,type=CephString,req=false",
"show heap usage info (available only if compiled with tcmalloc)",
"mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("connection scores dump",
"show the scores used in connectivity-based elections",
"mon", "rwx",
FLAG(TELL))
COMMAND_WITH_FLAG("connection scores reset",
"reset the scores used in connectivity-based elections",
"mon", "rwx",
FLAG(TELL))
COMMAND_WITH_FLAG("sync_force "
"name=yes_i_really_mean_it,type=CephBool,req=false",
"force sync of and clear monitor store",
"mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("add_bootstrap_peer_hint "
"name=addr,type=CephIPAddr",
"add peer address as potential bootstrap "
"peer for cluster bringup",
"mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("add_bootstrap_peer_hintv "
"name=addrv,type=CephString",
"add peer address vector as potential bootstrap "
"peer for cluster bringup",
"mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("quorum enter ",
"force monitor back into quorum",
"mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("quorum exit",
"force monitor out of the quorum",
"mon", "rw",
FLAG(TELL))
COMMAND_WITH_FLAG("ops",
"show the ops currently in flight",
"mon", "r",
FLAG(TELL))
COMMAND_WITH_FLAG("sessions",
"list existing sessions",
"mon", "r",
FLAG(TELL))
COMMAND_WITH_FLAG("dump_historic_ops",
"show recent ops",
"mon", "r",
FLAG(TELL))
COMMAND_WITH_FLAG("dump_historic_slow_ops",
"show recent slow ops",
"mon", "r",
FLAG(TELL))
| 57,562 | 39.508797 | 999 |
h
|
null |
ceph-main/src/mon/MonMap.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "MonMap.h"
#include <algorithm>
#include <sstream>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#ifdef WITH_SEASTAR
#include <seastar/core/fstream.hh>
#include <seastar/core/reactor.hh>
#include <seastar/net/dns.hh>
#include "crimson/common/config_proxy.h"
#endif
#include "common/Formatter.h"
#include "include/ceph_features.h"
#include "include/addr_parsing.h"
#include "common/ceph_argparse.h"
#include "common/dns_resolve.h"
#include "common/errno.h"
#include "common/dout.h"
#include "common/Clock.h"
#include "mon/health_check.h"
using std::list;
using std::map;
using std::ostream;
using std::ostringstream;
using std::set;
using std::string;
using std::vector;
using ceph::DNSResolver;
using ceph::Formatter;
#ifdef WITH_SEASTAR
namespace {
seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_monc);
}
}
#endif
void mon_info_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
uint8_t v = 5;
uint8_t min_v = 1;
if (!crush_loc.empty()) {
// we added crush_loc in version 5, but need to let old clients decode it
// so just leave the min_v at version 1. Monitors are protected
// from misunderstandings about location because setting it is blocked
// on FEATURE_PINGING
min_v = 1;
}
if (!HAVE_FEATURE(features, SERVER_NAUTILUS)) {
v = 2;
}
ENCODE_START(v, min_v, bl);
encode(name, bl);
if (v < 3) {
ceph_assert(min_v == 1);
auto a = public_addrs.legacy_addr();
if (a != entity_addr_t()) {
encode(a, bl, features);
} else {
// note: we don't have a legacy addr here, so lie so that it looks
// like one, just so that old clients get a valid-looking map.
// they won't be able to talk to the v2 mons, but that's better
// than nothing.
encode(public_addrs.as_legacy_addr(), bl, features);
}
} else {
encode(public_addrs, bl, features);
}
encode(priority, bl);
encode(weight, bl);
encode(crush_loc, bl);
ENCODE_FINISH(bl);
}
void mon_info_t::decode(ceph::buffer::list::const_iterator& p)
{
DECODE_START(5, p);
decode(name, p);
decode(public_addrs, p);
if (struct_v >= 2) {
decode(priority, p);
}
if (struct_v >= 4) {
decode(weight, p);
}
if (struct_v >= 5) {
decode(crush_loc, p);
}
DECODE_FINISH(p);
}
void mon_info_t::print(ostream& out) const
{
out << "mon." << name
<< " addrs " << public_addrs
<< " priority " << priority
<< " weight " << weight
<< " crush location " << crush_loc;
}
namespace {
struct rank_cmp {
bool operator()(const mon_info_t &a, const mon_info_t &b) const {
if (a.public_addrs.legacy_or_front_addr() == b.public_addrs.legacy_or_front_addr())
return a.name < b.name;
return a.public_addrs.legacy_or_front_addr() < b.public_addrs.legacy_or_front_addr();
}
};
}
void MonMap::calc_legacy_ranks()
{
ranks.resize(mon_info.size());
// Used to order entries according to public_addr, because that's
// how the ranks are expected to be ordered by. We may expand this
// later on, according to some other criteria, by specifying a
// different comparator.
//
// Please note that we use a 'set' here instead of resorting to
// std::sort() because we need more info than that's available in
// the vector. The vector will thus be ordered by, e.g., public_addr
// while only containing the names of each individual monitor.
// The only way of achieving this with std::sort() would be to first
// insert every mon_info_t entry into a vector 'foo', std::sort() 'foo'
// with custom comparison functions, and then copy each invidual entry
// to a new vector. Unless there's a simpler way, we don't think the
// added complexity makes up for the additional memory usage of a 'set'.
set<mon_info_t, rank_cmp> tmp;
for (auto p = mon_info.begin(); p != mon_info.end(); ++p) {
mon_info_t &m = p->second;
tmp.insert(m);
}
// map the set to the actual ranks etc
unsigned i = 0;
for (auto p = tmp.begin(); p != tmp.end(); ++p, ++i) {
ranks[i] = p->name;
}
}
void MonMap::encode(ceph::buffer::list& blist, uint64_t con_features) const
{
if ((con_features & CEPH_FEATURE_MONNAMES) == 0) {
using ceph::encode;
__u16 v = 1;
encode(v, blist);
ceph::encode_raw(fsid, blist);
encode(epoch, blist);
vector<entity_inst_t> mon_inst(ranks.size());
for (unsigned n = 0; n < ranks.size(); n++) {
mon_inst[n].name = entity_name_t::MON(n);
mon_inst[n].addr = get_addrs(n).legacy_addr();
}
encode(mon_inst, blist, con_features);
encode(last_changed, blist);
encode(created, blist);
return;
}
map<string,entity_addr_t> legacy_mon_addr;
if (!HAVE_FEATURE(con_features, MONENC) ||
!HAVE_FEATURE(con_features, SERVER_NAUTILUS)) {
for (auto& [name, info] : mon_info) {
legacy_mon_addr[name] = info.public_addrs.legacy_addr();
}
}
if (!HAVE_FEATURE(con_features, MONENC)) {
/* we keep the mon_addr map when encoding to ensure compatibility
* with clients and other monitors that do not yet support the 'mons'
* map. This map keeps its original behavior, containing a mapping of
* monitor id (i.e., 'foo' in 'mon.foo') to the monitor's public
* address -- which is obtained from the public address of each entry
* in the 'mons' map.
*/
using ceph::encode;
__u16 v = 2;
encode(v, blist);
ceph::encode_raw(fsid, blist);
encode(epoch, blist);
encode(legacy_mon_addr, blist, con_features);
encode(last_changed, blist);
encode(created, blist);
return;
}
if (!HAVE_FEATURE(con_features, SERVER_NAUTILUS)) {
ENCODE_START(5, 3, blist);
ceph::encode_raw(fsid, blist);
encode(epoch, blist);
encode(legacy_mon_addr, blist, con_features);
encode(last_changed, blist);
encode(created, blist);
encode(persistent_features, blist);
encode(optional_features, blist);
encode(mon_info, blist, con_features);
ENCODE_FINISH(blist);
return;
}
ENCODE_START(9, 6, blist);
ceph::encode_raw(fsid, blist);
encode(epoch, blist);
encode(last_changed, blist);
encode(created, blist);
encode(persistent_features, blist);
encode(optional_features, blist);
encode(mon_info, blist, con_features);
encode(ranks, blist);
encode(min_mon_release, blist);
encode(removed_ranks, blist);
uint8_t t = strategy;
encode(t, blist);
encode(disallowed_leaders, blist);
encode(stretch_mode_enabled, blist);
encode(tiebreaker_mon, blist);
encode(stretch_marked_down_mons, blist);
ENCODE_FINISH(blist);
}
void MonMap::decode(ceph::buffer::list::const_iterator& p)
{
map<string,entity_addr_t> mon_addr;
DECODE_START_LEGACY_COMPAT_LEN_16(9, 3, 3, p);
ceph::decode_raw(fsid, p);
decode(epoch, p);
if (struct_v == 1) {
vector<entity_inst_t> mon_inst;
decode(mon_inst, p);
for (unsigned i = 0; i < mon_inst.size(); i++) {
char n[2];
n[0] = '0' + i;
n[1] = 0;
string name = n;
mon_addr[name] = mon_inst[i].addr;
}
} else if (struct_v < 6) {
decode(mon_addr, p);
}
decode(last_changed, p);
decode(created, p);
if (struct_v >= 4) {
decode(persistent_features, p);
decode(optional_features, p);
}
if (struct_v < 5) {
// generate mon_info from legacy mon_addr
for (auto& [name, addr] : mon_addr) {
mon_info_t &m = mon_info[name];
m.name = name;
m.public_addrs = entity_addrvec_t(addr);
}
} else {
decode(mon_info, p);
}
if (struct_v < 6) {
calc_legacy_ranks();
} else {
decode(ranks, p);
}
if (struct_v >= 7) {
decode(min_mon_release, p);
} else {
min_mon_release = infer_ceph_release_from_mon_features(persistent_features);
}
if (struct_v >= 8) {
decode(removed_ranks, p);
uint8_t t;
decode(t, p);
strategy = static_cast<election_strategy>(t);
decode(disallowed_leaders, p);
}
if (struct_v >= 9) {
decode(stretch_mode_enabled, p);
decode(tiebreaker_mon, p);
decode(stretch_marked_down_mons, p);
} else {
stretch_mode_enabled = false;
tiebreaker_mon = "";
stretch_marked_down_mons.clear();
}
calc_addr_mons();
DECODE_FINISH(p);
}
void MonMap::generate_test_instances(list<MonMap*>& o)
{
o.push_back(new MonMap);
o.push_back(new MonMap);
o.back()->epoch = 1;
o.back()->last_changed = utime_t(123, 456);
o.back()->created = utime_t(789, 101112);
o.back()->add("one", entity_addrvec_t());
MonMap *m = new MonMap;
{
m->epoch = 1;
m->last_changed = utime_t(123, 456);
entity_addrvec_t empty_addr_one = entity_addrvec_t(entity_addr_t());
empty_addr_one.v[0].set_nonce(1);
m->add("empty_addr_one", empty_addr_one);
entity_addrvec_t empty_addr_two = entity_addrvec_t(entity_addr_t());
empty_addr_two.v[0].set_nonce(2);
m->add("empty_addr_two", empty_addr_two);
const char *local_pub_addr_s = "127.0.1.2";
const char *end_p = local_pub_addr_s + strlen(local_pub_addr_s);
entity_addrvec_t local_pub_addr;
local_pub_addr.parse(local_pub_addr_s, &end_p);
m->add(mon_info_t("filled_pub_addr", entity_addrvec_t(local_pub_addr), 1, 1));
m->add("empty_addr_zero", entity_addrvec_t());
}
o.push_back(m);
}
// read from/write to a file
int MonMap::write(const char *fn)
{
// encode
ceph::buffer::list bl;
encode(bl, CEPH_FEATURES_ALL);
return bl.write_file(fn);
}
int MonMap::read(const char *fn)
{
// read
ceph::buffer::list bl;
std::string error;
int r = bl.read_file(fn, &error);
if (r < 0)
return r;
decode(bl);
return 0;
}
void MonMap::print_summary(ostream& out) const
{
out << "e" << epoch << ": "
<< mon_info.size() << " mons at {";
// the map that we used to print, as it was, no longer
// maps strings to the monitor's public address, but to
// mon_info_t instead. As such, print the map in a way
// that keeps the expected format.
bool has_printed = false;
for (auto p = mon_info.begin(); p != mon_info.end(); ++p) {
if (has_printed)
out << ",";
out << p->first << "=" << p->second.public_addrs;
has_printed = true;
}
out << "}" << " removed_ranks: {" << removed_ranks << "}";
}
void MonMap::print(ostream& out) const
{
out << "epoch " << epoch << "\n";
out << "fsid " << fsid << "\n";
out << "last_changed " << last_changed << "\n";
out << "created " << created << "\n";
out << "min_mon_release " << to_integer<unsigned>(min_mon_release)
<< " (" << min_mon_release << ")\n";
out << "election_strategy: " << strategy << "\n";
if (stretch_mode_enabled) {
out << "stretch_mode_enabled " << stretch_mode_enabled << "\n";
out << "tiebreaker_mon " << tiebreaker_mon << "\n";
}
if (stretch_mode_enabled ||
!disallowed_leaders.empty()) {
out << "disallowed_leaders " << disallowed_leaders << "\n";
}
unsigned i = 0;
for (auto p = ranks.begin(); p != ranks.end(); ++p) {
const auto &mi = mon_info.find(*p);
ceph_assert(mi != mon_info.end());
out << i++ << ": " << mi->second.public_addrs << " mon." << *p;
if (!mi->second.crush_loc.empty()) {
out << "; crush_location " << mi->second.crush_loc;
}
out << "\n";
}
}
void MonMap::dump(Formatter *f) const
{
f->dump_unsigned("epoch", epoch);
f->dump_stream("fsid") << fsid;
last_changed.gmtime(f->dump_stream("modified"));
created.gmtime(f->dump_stream("created"));
f->dump_unsigned("min_mon_release", to_integer<unsigned>(min_mon_release));
f->dump_string("min_mon_release_name", to_string(min_mon_release));
f->dump_int ("election_strategy", strategy);
f->dump_stream("disallowed_leaders: ") << disallowed_leaders;
f->dump_bool("stretch_mode", stretch_mode_enabled);
f->dump_string("tiebreaker_mon", tiebreaker_mon);
f->dump_stream("removed_ranks: ") << removed_ranks;
f->open_object_section("features");
persistent_features.dump(f, "persistent");
optional_features.dump(f, "optional");
f->close_section();
f->open_array_section("mons");
int i = 0;
for (auto p = ranks.begin(); p != ranks.end(); ++p, ++i) {
f->open_object_section("mon");
f->dump_int("rank", i);
f->dump_string("name", *p);
f->dump_object("public_addrs", get_addrs(*p));
// compat: make these look like pre-nautilus entity_addr_t
f->dump_stream("addr") << get_addrs(*p).get_legacy_str();
f->dump_stream("public_addr") << get_addrs(*p).get_legacy_str();
f->dump_unsigned("priority", get_priority(*p));
f->dump_unsigned("weight", get_weight(*p));
const auto &mi = mon_info.find(*p);
// we don't need to assert this validity as all the get_* functions did
f->dump_stream("crush_location") << mi->second.crush_loc;
f->close_section();
}
f->close_section();
}
void MonMap::dump_summary(Formatter *f) const
{
f->dump_unsigned("epoch", epoch);
f->dump_string("min_mon_release_name", to_string(min_mon_release));
f->dump_unsigned("num_mons", ranks.size());
}
// an ambiguous mon addr may be legacy or may be msgr2--we aren' sure.
// when that happens we need to try them both (unless we can
// reasonably infer from the port number which it is).
void MonMap::_add_ambiguous_addr(const string& name,
entity_addr_t addr,
int priority,
int weight,
bool for_mkfs)
{
if (addr.get_type() != entity_addr_t::TYPE_ANY) {
// a v1: or v2: prefix was specified
if (addr.get_port() == 0) {
// use default port
if (addr.get_type() == entity_addr_t::TYPE_LEGACY) {
addr.set_port(CEPH_MON_PORT_LEGACY);
} else if (addr.get_type() == entity_addr_t::TYPE_MSGR2) {
addr.set_port(CEPH_MON_PORT_IANA);
} else {
// wth
return;
}
if (!contains(addr)) {
add(name, entity_addrvec_t(addr), priority, weight);
}
} else {
if (!contains(addr)) {
add(name, entity_addrvec_t(addr), priority, weight);
}
}
} else {
// no v1: or v2: prefix specified
if (addr.get_port() == CEPH_MON_PORT_LEGACY) {
// legacy port implies legacy addr
addr.set_type(entity_addr_t::TYPE_LEGACY);
if (!contains(addr)) {
if (!for_mkfs) {
add(name + "-legacy", entity_addrvec_t(addr), priority, weight);
} else {
add(name, entity_addrvec_t(addr), priority, weight);
}
}
} else if (addr.get_port() == CEPH_MON_PORT_IANA) {
// iana port implies msgr2 addr
addr.set_type(entity_addr_t::TYPE_MSGR2);
if (!contains(addr)) {
add(name, entity_addrvec_t(addr), priority, weight);
}
} else if (addr.get_port() == 0) {
// no port; include both msgr2 and legacy ports
if (!for_mkfs) {
addr.set_type(entity_addr_t::TYPE_MSGR2);
addr.set_port(CEPH_MON_PORT_IANA);
if (!contains(addr)) {
add(name, entity_addrvec_t(addr), priority, weight);
}
addr.set_type(entity_addr_t::TYPE_LEGACY);
addr.set_port(CEPH_MON_PORT_LEGACY);
if (!contains(addr)) {
add(name + "-legacy", entity_addrvec_t(addr), priority, weight);
}
} else {
entity_addrvec_t av;
addr.set_type(entity_addr_t::TYPE_MSGR2);
addr.set_port(CEPH_MON_PORT_IANA);
av.v.push_back(addr);
addr.set_type(entity_addr_t::TYPE_LEGACY);
addr.set_port(CEPH_MON_PORT_LEGACY);
av.v.push_back(addr);
if (!contains(av)) {
add(name, av, priority, weight);
}
}
} else {
addr.set_type(entity_addr_t::TYPE_MSGR2);
if (!contains(addr)) {
add(name, entity_addrvec_t(addr), priority, weight);
}
if (!for_mkfs) {
// try legacy on same port too
addr.set_type(entity_addr_t::TYPE_LEGACY);
if (!contains(addr)) {
add(name + "-legacy", entity_addrvec_t(addr), priority, weight);
}
}
}
}
}
void MonMap::init_with_addrs(const std::vector<entity_addrvec_t>& addrs,
bool for_mkfs,
std::string_view prefix)
{
char id = 'a';
for (auto& addr : addrs) {
string name{prefix};
name += id++;
if (addr.v.size() == 1) {
_add_ambiguous_addr(name, addr.front(), 0, 0, for_mkfs);
} else {
// they specified an addrvec, so let's assume they also specified
// the addr *type* and *port*. (we could possibly improve this?)
add(name, addr, 0);
}
}
}
int MonMap::init_with_ips(const std::string& ips,
bool for_mkfs,
std::string_view prefix)
{
vector<entity_addrvec_t> addrs;
if (!parse_ip_port_vec(
ips.c_str(), addrs,
entity_addr_t::TYPE_ANY)) {
return -EINVAL;
}
if (addrs.empty())
return -ENOENT;
init_with_addrs(addrs, for_mkfs, prefix);
return 0;
}
int MonMap::init_with_hosts(const std::string& hostlist,
bool for_mkfs,
std::string_view prefix)
{
// maybe they passed us a DNS-resolvable name
char *hosts = resolve_addrs(hostlist.c_str());
if (!hosts)
return -EINVAL;
vector<entity_addrvec_t> addrs;
bool success = parse_ip_port_vec(
hosts, addrs,
entity_addr_t::TYPE_ANY);
free(hosts);
if (!success)
return -EINVAL;
if (addrs.empty())
return -ENOENT;
init_with_addrs(addrs, for_mkfs, prefix);
calc_legacy_ranks();
return 0;
}
void MonMap::set_initial_members(CephContext *cct,
list<std::string>& initial_members,
string my_name,
const entity_addrvec_t& my_addrs,
set<entity_addrvec_t> *removed)
{
// remove non-initial members
unsigned i = 0;
while (i < size()) {
string n = get_name(i);
if (std::find(initial_members.begin(), initial_members.end(), n)
!= initial_members.end()) {
lgeneric_dout(cct, 1) << " keeping " << n << " " << get_addrs(i) << dendl;
i++;
continue;
}
lgeneric_dout(cct, 1) << " removing " << get_name(i) << " " << get_addrs(i)
<< dendl;
if (removed) {
removed->insert(get_addrs(i));
}
remove(n);
ceph_assert(!contains(n));
}
// add missing initial members
for (auto& p : initial_members) {
if (!contains(p)) {
if (p == my_name) {
lgeneric_dout(cct, 1) << " adding self " << p << " " << my_addrs
<< dendl;
add(p, my_addrs);
} else {
entity_addr_t a;
a.set_type(entity_addr_t::TYPE_LEGACY);
a.set_family(AF_INET);
for (int n=1; ; n++) {
a.set_nonce(n);
if (!contains(a))
break;
}
lgeneric_dout(cct, 1) << " adding " << p << " " << a << dendl;
add(p, entity_addrvec_t(a));
}
ceph_assert(contains(p));
}
}
calc_legacy_ranks();
}
int MonMap::init_with_config_file(const ConfigProxy& conf,
std::ostream& errout)
{
std::vector<std::string> sections;
int ret = conf.get_all_sections(sections);
if (ret) {
errout << "Unable to find any monitors in the configuration "
<< "file, because there was an error listing the sections. error "
<< ret << std::endl;
return -ENOENT;
}
std::vector<std::string> mon_names;
for (const auto& section : sections) {
if (section.substr(0, 4) == "mon." && section.size() > 4) {
mon_names.push_back(section.substr(4));
}
}
// Find an address for each monitor in the config file.
for (const auto& mon_name : mon_names) {
std::vector<std::string> sections;
std::string m_name("mon");
m_name += ".";
m_name += mon_name;
sections.push_back(m_name);
sections.push_back("mon");
sections.push_back("global");
std::string val;
int res = conf.get_val_from_conf_file(sections, "mon addr", val, true);
if (res) {
errout << "failed to get an address for mon." << mon_name
<< ": error " << res << std::endl;
continue;
}
// the 'mon addr' field is a legacy field, so assume anything
// there on a weird port is a v1 address, and do not handle
// addrvecs.
entity_addr_t addr;
if (!addr.parse(val, entity_addr_t::TYPE_LEGACY)) {
errout << "unable to parse address for mon." << mon_name
<< ": addr='" << val << "'" << std::endl;
continue;
}
if (addr.get_port() == 0) {
addr.set_port(CEPH_MON_PORT_LEGACY);
}
uint16_t priority = 0;
if (!conf.get_val_from_conf_file(sections, "mon priority", val, false)) {
try {
priority = std::stoul(val);
} catch (std::logic_error&) {
errout << "unable to parse priority for mon." << mon_name
<< ": priority='" << val << "'" << std::endl;
continue;
}
}
uint16_t weight = 0;
if (!conf.get_val_from_conf_file(sections, "mon weight", val, false)) {
try {
weight = std::stoul(val);
} catch (std::logic_error&) {
errout << "unable to parse weight for mon." << mon_name
<< ": weight='" << val << "'"
<< std::endl;
continue;
}
}
// make sure this mon isn't already in the map
if (contains(addr))
remove(get_name(addr));
if (contains(mon_name))
remove(mon_name);
_add_ambiguous_addr(mon_name, addr, priority, weight, false);
}
return 0;
}
void MonMap::check_health(health_check_map_t *checks) const
{
if (stretch_mode_enabled) {
list<string> detail;
for (auto& p : mon_info) {
if (p.second.crush_loc.empty()) {
ostringstream ss;
ss << "mon " << p.first << " has no location set while in stretch mode";
detail.push_back(ss.str());
}
}
if (!detail.empty()) {
ostringstream ss;
ss << detail.size() << " monitor(s) have no location set while in stretch mode"
<< "; this may cause issues with failover, OSD connections, netsplit handling, etc";
auto& d = checks->add("MON_LOCATION_NOT_SET", HEALTH_WARN,
ss.str(), detail.size());
d.detail.swap(detail);
}
}
}
#ifdef WITH_SEASTAR
seastar::future<> MonMap::read_monmap(const std::string& monmap)
{
using namespace seastar;
return open_file_dma(monmap, open_flags::ro).then([this] (file f) {
return f.size().then([this, f = std::move(f)](size_t s) {
return do_with(make_file_input_stream(f), [this, s](input_stream<char>& in) {
return in.read_exactly(s).then([this](temporary_buffer<char> buf) {
ceph::buffer::list bl;
bl.push_back(ceph::buffer::ptr_node::create(
ceph::buffer::create(std::move(buf))));
decode(bl);
});
});
});
});
}
seastar::future<> MonMap::init_with_dns_srv(bool for_mkfs, const std::string& name)
{
logger().debug("{}: for_mkfs={}, name={}", __func__, for_mkfs, name);
string domain;
string service = name;
// check if domain is also provided and extract it from srv_name
size_t idx = name.find("_");
if (idx != name.npos) {
domain = name.substr(idx + 1);
service = name.substr(0, idx);
}
return seastar::net::dns::get_srv_records(
seastar::net::dns_resolver::srv_proto::tcp,
service, domain).then([this](seastar::net::dns_resolver::srv_records records) {
return seastar::parallel_for_each(records, [this](auto record) {
return seastar::net::dns::resolve_name(record.target).then(
[record,this](seastar::net::inet_address a) {
// the resolved address does not contain ceph specific info like nonce
// nonce or msgr proto (legacy, msgr2), so set entity_addr_t manually
entity_addr_t addr;
addr.set_type(entity_addr_t::TYPE_ANY);
addr.set_family(int(a.in_family()));
addr.set_port(record.port);
switch (a.in_family()) {
case seastar::net::inet_address::family::INET:
addr.in4_addr().sin_addr = a;
break;
case seastar::net::inet_address::family::INET6:
addr.in6_addr().sin6_addr = a;
break;
}
_add_ambiguous_addr(record.target,
addr,
record.priority,
record.weight,
false);
}).handle_exception_type([t=record.target](const std::system_error& e) {
logger().debug("{}: unable to resolve name for {}: {}",
"init_with_dns_srv", t, e);
});
});
}).handle_exception_type([name](const std::system_error& e) {
logger().debug("{}: unable to get monitor info from DNS SRV with {}: {}",
"init_with_dns_srv", name, e);
// ignore DNS failures
return seastar::make_ready_future<>();
});
}
bool MonMap::maybe_init_with_mon_host(const std::string& mon_host,
const bool for_mkfs)
{
if (!mon_host.empty()) {
if (auto ret = init_with_ips(mon_host, for_mkfs, "noname-"); ret == 0) {
return true;
}
// TODO: resolve_addrs() is a blocking call
if (auto ret = init_with_hosts(mon_host, for_mkfs, "noname-"); ret == 0) {
return true;
} else {
throw std::runtime_error(cpp_strerror(ret));
}
}
return false;
}
seastar::future<> MonMap::build_monmap(const crimson::common::ConfigProxy& conf,
bool for_mkfs)
{
logger().debug("{}: for_mkfs={}", __func__, for_mkfs);
// -m foo?
if (maybe_init_with_mon_host(conf.get_val<std::string>("mon_host"), for_mkfs)) {
return seastar::make_ready_future<>();
}
// What monitors are in the config file?
ostringstream errout;
if (auto ret = init_with_config_file(conf, errout); ret < 0) {
throw std::runtime_error(errout.str());
}
if (size() > 0) {
return seastar::make_ready_future<>();
}
// no info found from conf options lets try use DNS SRV records
const string srv_name = conf.get_val<std::string>("mon_dns_srv_name");
return init_with_dns_srv(for_mkfs, srv_name).then([this] {
if (size() == 0) {
throw std::runtime_error("no monitors specified to connect to.");
}
});
}
seastar::future<> MonMap::build_initial(const crimson::common::ConfigProxy& conf, bool for_mkfs)
{
// mon_host_override?
if (maybe_init_with_mon_host(conf.get_val<std::string>("mon_host_override"),
for_mkfs)) {
return seastar::make_ready_future<>();
}
// file?
if (const auto monmap = conf.get_val<std::string>("monmap");
!monmap.empty()) {
return read_monmap(monmap);
} else {
// fsid from conf?
if (const auto new_fsid = conf.get_val<uuid_d>("fsid");
!new_fsid.is_zero()) {
fsid = new_fsid;
}
return build_monmap(conf, for_mkfs).then([this] {
created = ceph_clock_now();
last_changed = created;
calc_legacy_ranks();
});
}
}
#else // WITH_SEASTAR
int MonMap::init_with_monmap(const std::string& monmap, std::ostream& errout)
{
int r;
try {
r = read(monmap.c_str());
} catch (ceph::buffer::error&) {
r = -EINVAL;
}
if (r >= 0)
return 0;
errout << "unable to read/decode monmap from " << monmap
<< ": " << cpp_strerror(-r) << std::endl;
return r;
}
int MonMap::init_with_dns_srv(CephContext* cct,
std::string srv_name,
bool for_mkfs,
std::ostream& errout)
{
lgeneric_dout(cct, 1) << __func__ << " srv_name: " << srv_name << dendl;
string domain;
// check if domain is also provided and extract it from srv_name
size_t idx = srv_name.find("_");
if (idx != string::npos) {
domain = srv_name.substr(idx + 1);
srv_name = srv_name.substr(0, idx);
}
map<string, DNSResolver::Record> records;
if (DNSResolver::get_instance()->resolve_srv_hosts(cct, srv_name,
DNSResolver::SRV_Protocol::TCP, domain, &records) != 0) {
errout << "unable to get monitor info from DNS SRV with service name: "
<< "ceph-mon" << std::endl;
return -1;
} else {
for (auto& record : records) {
record.second.addr.set_type(entity_addr_t::TYPE_ANY);
_add_ambiguous_addr(record.first,
record.second.addr,
record.second.priority,
record.second.weight,
false);
}
return 0;
}
}
int MonMap::build_initial(CephContext *cct, bool for_mkfs, ostream& errout)
{
lgeneric_dout(cct, 1) << __func__ << " for_mkfs: " << for_mkfs << dendl;
const auto& conf = cct->_conf;
// mon_host_override?
auto mon_host_override = conf.get_val<std::string>("mon_host_override");
if (!mon_host_override.empty()) {
lgeneric_dout(cct, 1) << "Using mon_host_override " << mon_host_override << dendl;
auto ret = init_with_ips(mon_host_override, for_mkfs, "noname-");
if (ret == -EINVAL) {
ret = init_with_hosts(mon_host_override, for_mkfs, "noname-");
}
if (ret < 0) {
errout << "unable to parse addrs in '" << mon_host_override << "'"
<< std::endl;
}
return ret;
}
// cct?
auto addrs = cct->get_mon_addrs();
if (addrs != nullptr && (addrs->size() > 0)) {
init_with_addrs(*addrs, for_mkfs, "noname-");
return 0;
}
// file?
if (const auto monmap = conf.get_val<std::string>("monmap");
!monmap.empty()) {
return init_with_monmap(monmap, errout);
}
// fsid from conf?
if (const auto new_fsid = conf.get_val<uuid_d>("fsid");
!new_fsid.is_zero()) {
fsid = new_fsid;
}
// -m foo?
if (const auto mon_host = conf.get_val<std::string>("mon_host");
!mon_host.empty()) {
auto ret = init_with_ips(mon_host, for_mkfs, "noname-");
if (ret == -EINVAL) {
ret = init_with_hosts(mon_host, for_mkfs, "noname-");
}
if (ret < 0) {
errout << "unable to parse addrs in '" << mon_host << "'"
<< std::endl;
return ret;
}
}
if (size() == 0) {
// What monitors are in the config file?
if (auto ret = init_with_config_file(conf, errout); ret < 0) {
return ret;
}
}
if (size() == 0) {
// no info found from conf options lets try use DNS SRV records
string srv_name = conf.get_val<std::string>("mon_dns_srv_name");
if (auto ret = init_with_dns_srv(cct, srv_name, for_mkfs, errout); ret < 0) {
return -ENOENT;
}
}
if (size() == 0) {
errout << "no monitors specified to connect to." << std::endl;
return -ENOENT;
}
strategy = static_cast<election_strategy>(conf.get_val<uint64_t>("mon_election_default_strategy"));
created = ceph_clock_now();
last_changed = created;
calc_legacy_ranks();
return 0;
}
#endif // WITH_SEASTAR
| 30,318 | 29.108242 | 101 |
cc
|
null |
ceph-main/src/mon/MonMap.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MONMAP_H
#define CEPH_MONMAP_H
#ifdef WITH_SEASTAR
#include <seastar/core/future.hh>
#endif
#include "common/config_fwd.h"
#include "common/ceph_releases.h"
#include "include/err.h"
#include "include/types.h"
#include "mon/mon_types.h"
#include "msg/Message.h"
class health_check_map_t;
#ifdef WITH_SEASTAR
namespace crimson::common {
class ConfigProxy;
}
#endif
namespace ceph {
class Formatter;
}
struct mon_info_t {
/**
* monitor name
*
* i.e., 'foo' in 'mon.foo'
*/
std::string name;
/**
* monitor's public address(es)
*
* public facing address(es), used to communicate with all clients
* and with other monitors.
*/
entity_addrvec_t public_addrs;
/**
* the priority of the mon, the lower value the more preferred
*/
uint16_t priority{0};
uint16_t weight{0};
/**
* The location of the monitor, in CRUSH hierarchy terms
*/
std::map<std::string,std::string> crush_loc;
// <REMOVE ME>
mon_info_t(const std::string& n, const entity_addr_t& p_addr, uint16_t p)
: name(n), public_addrs(p_addr), priority(p)
{}
// </REMOVE ME>
mon_info_t(const std::string& n, const entity_addrvec_t& p_addrs,
uint16_t p, uint16_t w)
: name(n), public_addrs(p_addrs), priority(p), weight(w)
{}
mon_info_t(const std::string &n, const entity_addrvec_t& p_addrs)
: name(n), public_addrs(p_addrs)
{ }
mon_info_t() { }
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& p);
void print(std::ostream& out) const;
};
WRITE_CLASS_ENCODER_FEATURES(mon_info_t)
inline std::ostream& operator<<(std::ostream& out, const mon_info_t& mon) {
mon.print(out);
return out;
}
class MonMap {
public:
epoch_t epoch; // what epoch/version of the monmap
uuid_d fsid;
utime_t last_changed;
utime_t created;
std::map<std::string, mon_info_t> mon_info;
std::map<entity_addr_t, std::string> addr_mons;
std::vector<std::string> ranks;
/* ranks which were removed when this map took effect.
There should only be one at a time, but leave support
for arbitrary numbers just to be safe. */
std::set<unsigned> removed_ranks;
/**
* Persistent Features are all those features that once set on a
* monmap cannot, and should not, be removed. These will define the
* non-negotiable features that a given monitor must support to
* properly operate in a given quorum.
*
* Should be reserved for features that we really want to make sure
* are sticky, and are important enough to tolerate not being able
* to downgrade a monitor.
*/
mon_feature_t persistent_features;
/**
* Optional Features are all those features that can be enabled or
* disabled following a given criteria -- e.g., user-mandated via the
* cli --, and act much like indicators of what the cluster currently
* supports.
*
* They are by no means "optional" in the sense that monitors can
* ignore them. Just that they are not persistent.
*/
mon_feature_t optional_features;
/**
* Returns the set of features required by this monmap.
*
* The features required by this monmap is the union of all the
* currently set persistent features and the currently set optional
* features.
*
* @returns the set of features required by this monmap
*/
mon_feature_t get_required_features() const {
return (persistent_features | optional_features);
}
// upgrade gate
ceph_release_t min_mon_release{ceph_release_t::unknown};
void _add_ambiguous_addr(const std::string& name,
entity_addr_t addr,
int priority,
int weight,
bool for_mkfs);
enum election_strategy {
// Keep in sync with ElectionLogic.h!
CLASSIC = 1, // the original rank-based one
DISALLOW = 2, // disallow a set from being leader
CONNECTIVITY = 3 // includes DISALLOW, extends to prefer stronger connections
};
election_strategy strategy = CLASSIC;
std::set<std::string> disallowed_leaders; // can't be leader under CONNECTIVITY/DISALLOW
bool stretch_mode_enabled = false;
std::string tiebreaker_mon;
std::set<std::string> stretch_marked_down_mons; // can't be leader until fully recovered
public:
void calc_legacy_ranks();
void calc_addr_mons() {
// populate addr_mons
addr_mons.clear();
for (auto& p : mon_info) {
for (auto& a : p.second.public_addrs.v) {
addr_mons[a] = p.first;
}
}
}
MonMap()
: epoch(0) {
}
uuid_d& get_fsid() { return fsid; }
unsigned size() const {
return mon_info.size();
}
unsigned min_quorum_size(unsigned total_mons=0) const {
if (total_mons == 0) {
total_mons = size();
}
return total_mons / 2 + 1;
}
epoch_t get_epoch() const { return epoch; }
void set_epoch(epoch_t e) { epoch = e; }
/**
* Obtain list of public facing addresses
*
* @param ls list to populate with the monitors' addresses
*/
void list_addrs(std::list<entity_addr_t>& ls) const {
for (auto& i : mon_info) {
for (auto& j : i.second.public_addrs.v) {
ls.push_back(j);
}
}
}
/**
* Add new monitor to the monmap
*
* @param m monitor info of the new monitor
*/
void add(const mon_info_t& m) {
ceph_assert(mon_info.count(m.name) == 0);
for (auto& a : m.public_addrs.v) {
ceph_assert(addr_mons.count(a) == 0);
}
mon_info[m.name] = m;
if (get_required_features().contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
ranks.push_back(m.name);
ceph_assert(ranks.size() == mon_info.size());
} else {
calc_legacy_ranks();
}
calc_addr_mons();
}
/**
* Add new monitor to the monmap
*
* @param name Monitor name (i.e., 'foo' in 'mon.foo')
* @param addr Monitor's public address
*/
void add(const std::string &name, const entity_addrvec_t &addrv,
uint16_t priority=0, uint16_t weight=0) {
add(mon_info_t(name, addrv, priority, weight));
}
/**
* Remove monitor from the monmap
*
* @param name Monitor name (i.e., 'foo' in 'mon.foo')
*/
void remove(const std::string &name) {
// this must match what we do in ConnectionTracker::notify_rank_removed
ceph_assert(mon_info.count(name));
int rank = get_rank(name);
mon_info.erase(name);
disallowed_leaders.erase(name);
ceph_assert(mon_info.count(name) == 0);
if (rank >= 0 ) {
removed_ranks.insert(rank);
}
if (get_required_features().contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
ranks.erase(std::find(ranks.begin(), ranks.end(), name));
ceph_assert(ranks.size() == mon_info.size());
} else {
calc_legacy_ranks();
}
calc_addr_mons();
}
/**
* Rename monitor from @p oldname to @p newname
*
* @param oldname monitor's current name (i.e., 'foo' in 'mon.foo')
* @param newname monitor's new name (i.e., 'bar' in 'mon.bar')
*/
void rename(std::string oldname, std::string newname) {
ceph_assert(contains(oldname));
ceph_assert(!contains(newname));
mon_info[newname] = mon_info[oldname];
mon_info.erase(oldname);
mon_info[newname].name = newname;
if (get_required_features().contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
*std::find(ranks.begin(), ranks.end(), oldname) = newname;
ceph_assert(ranks.size() == mon_info.size());
} else {
calc_legacy_ranks();
}
calc_addr_mons();
}
int set_rank(const std::string& name, int rank) {
int oldrank = get_rank(name);
if (oldrank < 0) {
return -ENOENT;
}
if (rank < 0 || rank >= (int)ranks.size()) {
return -EINVAL;
}
if (oldrank != rank) {
ranks.erase(ranks.begin() + oldrank);
ranks.insert(ranks.begin() + rank, name);
}
return 0;
}
bool contains(const std::string& name) const {
return mon_info.count(name);
}
/**
* Check if monmap contains a monitor with address @p a
*
* @note checks for all addresses a monitor may have, public or otherwise.
*
* @param a monitor address
* @returns true if monmap contains a monitor with address @p;
* false otherwise.
*/
bool contains(const entity_addr_t &a, std::string *name=nullptr) const {
for (auto& i : mon_info) {
for (auto& j : i.second.public_addrs.v) {
if (j == a) {
if (name) {
*name = i.first;
}
return true;
}
}
}
return false;
}
bool contains(const entity_addrvec_t &av, std::string *name=nullptr) const {
for (auto& i : mon_info) {
for (auto& j : i.second.public_addrs.v) {
for (auto& k : av.v) {
if (j == k) {
if (name) {
*name = i.first;
}
return true;
}
}
}
}
return false;
}
std::string get_name(unsigned n) const {
ceph_assert(n < ranks.size());
return ranks[n];
}
std::string get_name(const entity_addr_t& a) const {
std::map<entity_addr_t, std::string>::const_iterator p = addr_mons.find(a);
if (p == addr_mons.end())
return std::string();
else
return p->second;
}
std::string get_name(const entity_addrvec_t& av) const {
for (auto& i : av.v) {
std::map<entity_addr_t, std::string>::const_iterator p = addr_mons.find(i);
if (p != addr_mons.end())
return p->second;
}
return std::string();
}
int get_rank(const std::string& n) const {
if (auto found = std::find(ranks.begin(), ranks.end(), n);
found != ranks.end()) {
return std::distance(ranks.begin(), found);
} else {
return -1;
}
}
int get_rank(const entity_addr_t& a) const {
std::string n = get_name(a);
if (!n.empty()) {
return get_rank(n);
}
return -1;
}
int get_rank(const entity_addrvec_t& av) const {
std::string n = get_name(av);
if (!n.empty()) {
return get_rank(n);
}
return -1;
}
bool get_addr_name(const entity_addr_t& a, std::string& name) {
if (addr_mons.count(a) == 0)
return false;
name = addr_mons[a];
return true;
}
const entity_addrvec_t& get_addrs(const std::string& n) const {
ceph_assert(mon_info.count(n));
std::map<std::string,mon_info_t>::const_iterator p = mon_info.find(n);
return p->second.public_addrs;
}
const entity_addrvec_t& get_addrs(unsigned m) const {
ceph_assert(m < ranks.size());
return get_addrs(ranks[m]);
}
void set_addrvec(const std::string& n, const entity_addrvec_t& a) {
ceph_assert(mon_info.count(n));
mon_info[n].public_addrs = a;
calc_addr_mons();
}
uint16_t get_priority(const std::string& n) const {
auto it = mon_info.find(n);
ceph_assert(it != mon_info.end());
return it->second.priority;
}
uint16_t get_weight(const std::string& n) const {
auto it = mon_info.find(n);
ceph_assert(it != mon_info.end());
return it->second.weight;
}
void set_weight(const std::string& n, uint16_t v) {
auto it = mon_info.find(n);
ceph_assert(it != mon_info.end());
it->second.weight = v;
}
void encode(ceph::buffer::list& blist, uint64_t con_features) const;
void decode(ceph::buffer::list& blist) {
auto p = std::cbegin(blist);
decode(p);
}
void decode(ceph::buffer::list::const_iterator& p);
void generate_fsid() {
fsid.generate_random();
}
// read from/write to a file
int write(const char *fn);
int read(const char *fn);
/**
* build an initial bootstrap monmap from conf
*
* Build an initial bootstrap monmap from the config. This will
* try, in this order:
*
* 1 monmap -- an explicitly provided monmap
* 2 mon_host -- list of monitors
* 3 config [mon.*] sections, and 'mon addr' fields in those sections
*
* @param cct context (and associated config)
* @param errout std::ostream to send error messages too
*/
#ifdef WITH_SEASTAR
seastar::future<> build_initial(const crimson::common::ConfigProxy& conf, bool for_mkfs);
#else
int build_initial(CephContext *cct, bool for_mkfs, std::ostream& errout);
#endif
/**
* filter monmap given a set of initial members.
*
* Remove mons that aren't in the initial_members list. Add missing
* mons and give them dummy IPs (blank IPv4, with a non-zero
* nonce). If the name matches my_name, then my_addr will be used in
* place of a dummy addr.
*
* @param initial_members list of initial member names
* @param my_name name of self, can be blank
* @param my_addr my addr
* @param removed optional pointer to set to insert removed mon addrs to
*/
void set_initial_members(CephContext *cct,
std::list<std::string>& initial_members,
std::string my_name,
const entity_addrvec_t& my_addrs,
std::set<entity_addrvec_t> *removed);
void print(std::ostream& out) const;
void print_summary(std::ostream& out) const;
void dump(ceph::Formatter *f) const;
void dump_summary(ceph::Formatter *f) const;
void check_health(health_check_map_t *checks) const;
static void generate_test_instances(std::list<MonMap*>& o);
protected:
/**
* build a monmap from a list of entity_addrvec_t's
*
* Give mons dummy names.
*
* @param addrs list of entity_addrvec_t's
* @param prefix prefix to prepend to generated mon names
*/
void init_with_addrs(const std::vector<entity_addrvec_t>& addrs,
bool for_mkfs,
std::string_view prefix);
/**
* build a monmap from a list of ips
*
* Give mons dummy names.
*
* @param hosts list of ips, space or comma separated
* @param prefix prefix to prepend to generated mon names
* @return 0 for success, -errno on error
*/
int init_with_ips(const std::string& ips,
bool for_mkfs,
std::string_view prefix);
/**
* build a monmap from a list of hostnames
*
* Give mons dummy names.
*
* @param hosts list of ips, space or comma separated
* @param prefix prefix to prepend to generated mon names
* @return 0 for success, -errno on error
*/
int init_with_hosts(const std::string& hostlist,
bool for_mkfs,
std::string_view prefix);
int init_with_config_file(const ConfigProxy& conf, std::ostream& errout);
#if WITH_SEASTAR
seastar::future<> read_monmap(const std::string& monmap);
/// try to build monmap with different settings, like
/// mon_host, mon* sections, and mon_dns_srv_name
seastar::future<> build_monmap(const crimson::common::ConfigProxy& conf, bool for_mkfs);
/// initialize monmap by resolving given service name
seastar::future<> init_with_dns_srv(bool for_mkfs, const std::string& name);
/// initialize monmap with `mon_host` or `mon_host_override`
bool maybe_init_with_mon_host(const std::string& mon_host, bool for_mkfs);
#else
/// read from encoded monmap file
int init_with_monmap(const std::string& monmap, std::ostream& errout);
int init_with_dns_srv(CephContext* cct, std::string srv_name, bool for_mkfs,
std::ostream& errout);
#endif
};
WRITE_CLASS_ENCODER_FEATURES(MonMap)
inline std::ostream& operator<<(std::ostream &out, const MonMap &m) {
m.print_summary(out);
return out;
}
#endif
| 15,679 | 27.56102 | 91 |
h
|
null |
ceph-main/src/mon/MonOpRequest.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat <[email protected]>
* Copyright (C) 2015 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef MON_OPREQUEST_H_
#define MON_OPREQUEST_H_
#include <iosfwd>
#include <stdint.h>
#include "common/TrackedOp.h"
#include "mon/Session.h"
#include "msg/Message.h"
struct MonOpRequest : public TrackedOp {
friend class OpTracker;
void mark_dispatch() {
mark_event("monitor_dispatch");
}
void mark_wait_for_quorum() {
mark_event("wait_for_quorum");
}
void mark_zap() {
mark_event("monitor_zap");
}
void mark_forwarded() {
mark_event("forwarded");
forwarded_to_leader = true;
}
void mark_svc_event(const std::string &service, const std::string &event) {
std::string s = service;
s.append(":").append(event);
mark_event(s);
}
void mark_logmon_event(const std::string &event) {
mark_svc_event("logm", event);
}
void mark_osdmon_event(const std::string &event) {
mark_svc_event("osdmap", event);
}
void mark_pgmon_event(const std::string &event) {
mark_svc_event("pgmap", event);
}
void mark_mdsmon_event(const std::string &event) {
mark_svc_event("mdsmap", event);
}
void mark_authmon_event(const std::string &event) {
mark_svc_event("auth", event);
}
void mark_paxos_event(const std::string &event) {
mark_svc_event("paxos", event);
}
enum op_type_t {
OP_TYPE_NONE = 0, ///< no type defined (default)
OP_TYPE_SERVICE, ///< belongs to a Paxos Service or similar
OP_TYPE_MONITOR, ///< belongs to the Monitor class
OP_TYPE_ELECTION, ///< belongs to the Elector class
OP_TYPE_PAXOS, ///< refers to Paxos messages
OP_TYPE_COMMAND, ///< is a command
};
MonOpRequest(const MonOpRequest &other) = delete;
MonOpRequest & operator = (const MonOpRequest &other) = delete;
private:
Message *request;
utime_t dequeued_time;
RefCountedPtr session;
ConnectionRef con;
bool forwarded_to_leader;
op_type_t op_type;
MonOpRequest(Message *req, OpTracker *tracker) :
TrackedOp(tracker,
req->get_recv_stamp().is_zero() ?
ceph_clock_now() : req->get_recv_stamp()),
request(req),
con(NULL),
forwarded_to_leader(false),
op_type(OP_TYPE_NONE)
{
if (req) {
con = req->get_connection();
if (con) {
session = con->get_priv();
}
}
}
void _dump(ceph::Formatter *f) const override {
{
f->open_array_section("events");
std::lock_guard l(lock);
for (auto i = events.begin(); i != events.end(); ++i) {
f->open_object_section("event");
f->dump_string("event", i->str);
f->dump_stream("time") << i->stamp;
auto i_next = i + 1;
if (i_next < events.end()) {
f->dump_float("duration", i_next->stamp - i->stamp);
} else {
f->dump_float("duration", events.rbegin()->stamp - get_initiated());
}
f->close_section();
}
f->close_section();
f->open_object_section("info");
f->dump_int("seq", seq);
f->dump_bool("src_is_mon", is_src_mon());
f->dump_stream("source") << request->get_source_inst();
f->dump_bool("forwarded_to_leader", forwarded_to_leader);
f->close_section();
}
}
protected:
void _dump_op_descriptor_unlocked(std::ostream& stream) const override {
get_req()->print(stream);
}
public:
~MonOpRequest() override {
request->put();
}
MonSession *get_session() const {
return static_cast<MonSession*>(session.get());
}
template<class T>
T *get_req() const { return static_cast<T*>(request); }
Message *get_req() const { return get_req<Message>(); }
int get_req_type() const {
if (!request)
return 0;
return request->get_type();
}
ConnectionRef get_connection() { return con; }
void set_session(MonSession *s) {
session.reset(s);
}
bool is_src_mon() const {
return (con && con->get_peer_type() & CEPH_ENTITY_TYPE_MON);
}
typedef boost::intrusive_ptr<MonOpRequest> Ref;
void set_op_type(op_type_t t) {
op_type = t;
}
void set_type_service() {
set_op_type(OP_TYPE_SERVICE);
}
void set_type_monitor() {
set_op_type(OP_TYPE_MONITOR);
}
void set_type_paxos() {
set_op_type(OP_TYPE_PAXOS);
}
void set_type_election_or_ping() {
set_op_type(OP_TYPE_ELECTION);
}
void set_type_command() {
set_op_type(OP_TYPE_COMMAND);
}
op_type_t get_op_type() {
return op_type;
}
bool is_type_service() {
return (get_op_type() == OP_TYPE_SERVICE);
}
bool is_type_monitor() {
return (get_op_type() == OP_TYPE_MONITOR);
}
bool is_type_paxos() {
return (get_op_type() == OP_TYPE_PAXOS);
}
bool is_type_election_or_ping() {
return (get_op_type() == OP_TYPE_ELECTION);
}
bool is_type_command() {
return (get_op_type() == OP_TYPE_COMMAND);
}
};
typedef MonOpRequest::Ref MonOpRequestRef;
struct C_MonOp : public Context
{
MonOpRequestRef op;
explicit C_MonOp(MonOpRequestRef o) :
op(o) { }
void finish(int r) override {
if (op && r == -ECANCELED) {
op->mark_event("callback canceled");
} else if (op && r == -EAGAIN) {
op->mark_event("callback retry");
} else if (op && r == 0) {
op->mark_event("callback finished");
}
_finish(r);
}
void mark_op_event(const std::string &event) {
if (op)
op->mark_event(event);
}
virtual void _finish(int r) = 0;
};
#endif /* MON_OPREQUEST_H_ */
| 5,825 | 23.376569 | 77 |
h
|
null |
ceph-main/src/mon/MonSub.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "MonSub.h"
bool MonSub::have_new() const {
return !sub_new.empty();
}
bool MonSub::need_renew() const
{
return ceph::coarse_mono_clock::now() > renew_after;
}
void MonSub::renewed()
{
if (clock::is_zero(renew_sent)) {
renew_sent = clock::now();
}
// update sub_sent with sub_new
sub_new.insert(sub_sent.begin(), sub_sent.end());
std::swap(sub_new, sub_sent);
sub_new.clear();
}
void MonSub::acked(uint32_t interval)
{
if (!clock::is_zero(renew_sent)) {
// NOTE: this is only needed for legacy (infernalis or older)
// mons; see MonClient::tick().
renew_after = renew_sent;
renew_after += ceph::make_timespan(interval / 2.0);
renew_sent = clock::zero();
}
}
bool MonSub::reload()
{
for (auto& [what, sub] : sub_sent) {
if (sub_new.count(what) == 0) {
sub_new[what] = sub;
}
}
return have_new();
}
void MonSub::got(const std::string& what, version_t have)
{
if (auto i = sub_new.find(what); i != sub_new.end()) {
auto& sub = i->second;
if (sub.start <= have) {
if (sub.flags & CEPH_SUBSCRIBE_ONETIME) {
sub_new.erase(i);
} else {
sub.start = have + 1;
}
}
} else if (auto i = sub_sent.find(what); i != sub_sent.end()) {
auto& sub = i->second;
if (sub.start <= have) {
if (sub.flags & CEPH_SUBSCRIBE_ONETIME) {
sub_sent.erase(i);
} else {
sub.start = have + 1;
}
}
}
}
bool MonSub::want(const std::string& what, version_t start, unsigned flags)
{
if (auto sub = sub_new.find(what);
sub != sub_new.end() &&
sub->second.start == start &&
sub->second.flags == flags) {
return false;
} else if (auto sub = sub_sent.find(what);
sub != sub_sent.end() &&
sub->second.start == start &&
sub->second.flags == flags) {
return false;
} else {
sub_new[what].start = start;
sub_new[what].flags = flags;
return true;
}
}
bool MonSub::inc_want(const std::string& what, version_t start, unsigned flags)
{
if (auto sub = sub_new.find(what); sub != sub_new.end()) {
if (sub->second.start >= start) {
return false;
} else {
sub->second.start = start;
sub->second.flags = flags;
return true;
}
} else if (auto sub = sub_sent.find(what);
sub == sub_sent.end() || sub->second.start < start) {
auto& item = sub_new[what];
item.start = start;
item.flags = flags;
return true;
} else {
return false;
}
}
void MonSub::unwant(const std::string& what)
{
sub_sent.erase(what);
sub_new.erase(what);
}
| 2,688 | 22.382609 | 79 |
cc
|
null |
ceph-main/src/mon/MonSub.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <string>
#include "common/ceph_time.h"
#include "include/types.h"
// mon subscriptions
class MonSub
{
public:
// @returns true if there is any "new" subscriptions
bool have_new() const;
auto get_subs() const {
return sub_new;
}
bool need_renew() const;
// change the status of "new" subscriptions to "sent"
void renewed();
// the peer acked the subscription request
void acked(uint32_t interval);
void got(const std::string& what, version_t version);
// revert the status of subscriptions from "sent" to "new"
// @returns true if there is any pending "new" subscriptions
bool reload();
// add a new subscription
bool want(const std::string& what, version_t start, unsigned flags);
// increment the requested subscription start point. If you do increase
// the value, apply the passed-in flags as well; otherwise do nothing.
bool inc_want(const std::string& what, version_t start, unsigned flags);
// cancel a subscription
void unwant(const std::string& what);
private:
// my subs, and current versions
std::map<std::string,ceph_mon_subscribe_item> sub_sent;
// unsent new subs
std::map<std::string,ceph_mon_subscribe_item> sub_new;
using time_point = ceph::coarse_mono_time;
using clock = typename time_point::clock;
time_point renew_sent;
time_point renew_after;
};
| 1,466 | 30.212766 | 74 |
h
|
null |
ceph-main/src/mon/Monitor.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iterator>
#include <sstream>
#include <tuple>
#include <stdlib.h>
#include <signal.h>
#include <limits.h>
#include <cstring>
#include <boost/scope_exit.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include "json_spirit/json_spirit_reader.h"
#include "json_spirit/json_spirit_writer.h"
#include "Monitor.h"
#include "common/version.h"
#include "common/blkdev.h"
#include "common/cmdparse.h"
#include "common/signal.h"
#include "osd/OSDMap.h"
#include "MonitorDBStore.h"
#include "messages/PaxosServiceMessage.h"
#include "messages/MMonMap.h"
#include "messages/MMonGetMap.h"
#include "messages/MMonGetVersion.h"
#include "messages/MMonGetVersionReply.h"
#include "messages/MGenericMessage.h"
#include "messages/MMonCommand.h"
#include "messages/MMonCommandAck.h"
#include "messages/MMonSync.h"
#include "messages/MMonScrub.h"
#include "messages/MMonProbe.h"
#include "messages/MMonJoin.h"
#include "messages/MMonPaxos.h"
#include "messages/MRoute.h"
#include "messages/MForward.h"
#include "messages/MMonSubscribe.h"
#include "messages/MMonSubscribeAck.h"
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "messages/MTimeCheck2.h"
#include "messages/MPing.h"
#include "common/strtol.h"
#include "common/ceph_argparse.h"
#include "common/Timer.h"
#include "common/Clock.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "common/admin_socket.h"
#include "global/signal_handler.h"
#include "common/Formatter.h"
#include "include/stringify.h"
#include "include/color.h"
#include "include/ceph_fs.h"
#include "include/str_list.h"
#include "OSDMonitor.h"
#include "MDSMonitor.h"
#include "MonmapMonitor.h"
#include "LogMonitor.h"
#include "AuthMonitor.h"
#include "MgrMonitor.h"
#include "MgrStatMonitor.h"
#include "ConfigMonitor.h"
#include "KVMonitor.h"
#include "mon/HealthMonitor.h"
#include "common/config.h"
#include "common/cmdparse.h"
#include "include/ceph_assert.h"
#include "include/compat.h"
#include "perfglue/heap_profiler.h"
#include "auth/none/AuthNoneClientHandler.h"
#define dout_subsys ceph_subsys_mon
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
using namespace TOPNSPC::common;
using std::cout;
using std::dec;
using std::hex;
using std::list;
using std::map;
using std::make_pair;
using std::ostream;
using std::ostringstream;
using std::pair;
using std::set;
using std::setfill;
using std::string;
using std::stringstream;
using std::to_string;
using std::vector;
using std::unique_ptr;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
using ceph::ErasureCodeInterfaceRef;
using ceph::ErasureCodeProfile;
using ceph::Formatter;
using ceph::JSONFormatter;
using ceph::make_message;
using ceph::mono_clock;
using ceph::mono_time;
using ceph::timespan_str;
static ostream& _prefix(std::ostream *_dout, const Monitor *mon) {
return *_dout << "mon." << mon->name << "@" << mon->rank
<< "(" << mon->get_state_name() << ") e" << mon->monmap->get_epoch() << " ";
}
const string Monitor::MONITOR_NAME = "monitor";
const string Monitor::MONITOR_STORE_PREFIX = "monitor_store";
#undef FLAG
#undef COMMAND
#undef COMMAND_WITH_FLAG
#define FLAG(f) (MonCommand::FLAG_##f)
#define COMMAND(parsesig, helptext, modulename, req_perms) \
{parsesig, helptext, modulename, req_perms, FLAG(NONE)},
#define COMMAND_WITH_FLAG(parsesig, helptext, modulename, req_perms, flags) \
{parsesig, helptext, modulename, req_perms, flags},
MonCommand mon_commands[] = {
#include <mon/MonCommands.h>
};
#undef COMMAND
#undef COMMAND_WITH_FLAG
Monitor::Monitor(CephContext* cct_, string nm, MonitorDBStore *s,
Messenger *m, Messenger *mgr_m, MonMap *map) :
Dispatcher(cct_),
AuthServer(cct_),
name(nm),
rank(-1),
messenger(m),
con_self(m ? m->get_loopback_connection() : NULL),
timer(cct_, lock),
finisher(cct_, "mon_finisher", "fin"),
cpu_tp(cct, "Monitor::cpu_tp", "cpu_tp", g_conf()->mon_cpu_threads),
has_ever_joined(false),
logger(NULL), cluster_logger(NULL), cluster_logger_registered(false),
monmap(map),
log_client(cct_, messenger, monmap, LogClient::FLAG_MON),
key_server(cct, &keyring),
auth_cluster_required(cct,
cct->_conf->auth_supported.empty() ?
cct->_conf->auth_cluster_required : cct->_conf->auth_supported),
auth_service_required(cct,
cct->_conf->auth_supported.empty() ?
cct->_conf->auth_service_required : cct->_conf->auth_supported),
mgr_messenger(mgr_m),
mgr_client(cct_, mgr_m, monmap),
gss_ktfile_client(cct->_conf.get_val<std::string>("gss_ktab_client_file")),
store(s),
elector(this, map->strategy),
required_features(0),
leader(0),
quorum_con_features(0),
// scrub
scrub_version(0),
scrub_event(NULL),
scrub_timeout_event(NULL),
// sync state
sync_provider_count(0),
sync_cookie(0),
sync_full(false),
sync_start_version(0),
sync_timeout_event(NULL),
sync_last_committed_floor(0),
timecheck_round(0),
timecheck_acks(0),
timecheck_rounds_since_clean(0),
timecheck_event(NULL),
admin_hook(NULL),
routed_request_tid(0),
op_tracker(cct, g_conf().get_val<bool>("mon_enable_op_tracker"), 1)
{
clog = log_client.create_channel(CLOG_CHANNEL_CLUSTER);
audit_clog = log_client.create_channel(CLOG_CHANNEL_AUDIT);
update_log_clients();
if (!gss_ktfile_client.empty()) {
// Assert we can export environment variable
/*
The default client keytab is used, if it is present and readable,
to automatically obtain initial credentials for GSSAPI client
applications. The principal name of the first entry in the client
keytab is used by default when obtaining initial credentials.
1. The KRB5_CLIENT_KTNAME environment variable.
2. The default_client_keytab_name profile variable in [libdefaults].
3. The hardcoded default, DEFCKTNAME.
*/
const int32_t set_result(setenv("KRB5_CLIENT_KTNAME",
gss_ktfile_client.c_str(), 1));
ceph_assert(set_result == 0);
}
op_tracker.set_complaint_and_threshold(
g_conf().get_val<std::chrono::seconds>("mon_op_complaint_time").count(),
g_conf().get_val<int64_t>("mon_op_log_threshold"));
op_tracker.set_history_size_and_duration(
g_conf().get_val<uint64_t>("mon_op_history_size"),
g_conf().get_val<std::chrono::seconds>("mon_op_history_duration").count());
op_tracker.set_history_slow_op_size_and_threshold(
g_conf().get_val<uint64_t>("mon_op_history_slow_op_size"),
g_conf().get_val<std::chrono::seconds>("mon_op_history_slow_op_threshold").count());
paxos = std::make_unique<Paxos>(*this, "paxos");
paxos_service[PAXOS_MDSMAP].reset(new MDSMonitor(*this, *paxos, "mdsmap"));
paxos_service[PAXOS_MONMAP].reset(new MonmapMonitor(*this, *paxos, "monmap"));
paxos_service[PAXOS_OSDMAP].reset(new OSDMonitor(cct, *this, *paxos, "osdmap"));
paxos_service[PAXOS_LOG].reset(new LogMonitor(*this, *paxos, "logm"));
paxos_service[PAXOS_AUTH].reset(new AuthMonitor(*this, *paxos, "auth"));
paxos_service[PAXOS_MGR].reset(new MgrMonitor(*this, *paxos, "mgr"));
paxos_service[PAXOS_MGRSTAT].reset(new MgrStatMonitor(*this, *paxos, "mgrstat"));
paxos_service[PAXOS_HEALTH].reset(new HealthMonitor(*this, *paxos, "health"));
paxos_service[PAXOS_CONFIG].reset(new ConfigMonitor(*this, *paxos, "config"));
paxos_service[PAXOS_KV].reset(new KVMonitor(*this, *paxos, "kv"));
bool r = mon_caps.parse("allow *", NULL);
ceph_assert(r);
exited_quorum = ceph_clock_now();
// prepare local commands
local_mon_commands.resize(std::size(mon_commands));
for (unsigned i = 0; i < std::size(mon_commands); ++i) {
local_mon_commands[i] = mon_commands[i];
}
MonCommand::encode_vector(local_mon_commands, local_mon_commands_bl);
prenautilus_local_mon_commands = local_mon_commands;
for (auto& i : prenautilus_local_mon_commands) {
std::string n = cmddesc_get_prenautilus_compat(i.cmdstring);
if (n != i.cmdstring) {
dout(20) << " pre-nautilus cmd " << i.cmdstring << " -> " << n << dendl;
i.cmdstring = n;
}
}
MonCommand::encode_vector(prenautilus_local_mon_commands, prenautilus_local_mon_commands_bl);
// assume our commands until we have an election. this only means
// we won't reply with EINVAL before the election; any command that
// actually matters will wait until we have quorum etc and then
// retry (and revalidate).
leader_mon_commands = local_mon_commands;
}
Monitor::~Monitor()
{
op_tracker.on_shutdown();
delete logger;
ceph_assert(session_map.sessions.empty());
}
class AdminHook : public AdminSocketHook {
Monitor *mon;
public:
explicit AdminHook(Monitor *m) : mon(m) {}
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& errss,
bufferlist& out) override {
stringstream outss;
int r = mon->do_admin_command(command, cmdmap, f, errss, outss);
out.append(outss);
return r;
}
};
int Monitor::do_admin_command(
std::string_view command,
const cmdmap_t& cmdmap,
Formatter *f,
std::ostream& err,
std::ostream& out)
{
std::lock_guard l(lock);
int r = 0;
string args;
for (auto p = cmdmap.begin();
p != cmdmap.end(); ++p) {
if (p->first == "prefix")
continue;
if (!args.empty())
args += ", ";
args += cmd_vartype_stringify(p->second);
}
args = "[" + args + "]";
bool read_only = (command == "mon_status" ||
command == "mon metadata" ||
command == "quorum_status" ||
command == "ops" ||
command == "sessions");
(read_only ? audit_clog->debug() : audit_clog->info())
<< "from='admin socket' entity='admin socket' "
<< "cmd='" << command << "' args=" << args << ": dispatch";
if (command == "mon_status") {
get_mon_status(f);
} else if (command == "quorum_status") {
_quorum_status(f, out);
} else if (command == "sync_force") {
bool validate = false;
if (!cmd_getval(cmdmap, "yes_i_really_mean_it", validate)) {
std::string v;
if (cmd_getval(cmdmap, "validate", v) &&
v == "--yes-i-really-mean-it") {
validate = true;
}
}
if (!validate) {
err << "are you SURE? this will mean the monitor store will be erased "
"the next time the monitor is restarted. pass "
"'--yes-i-really-mean-it' if you really do.";
r = -EPERM;
goto abort;
}
sync_force(f);
} else if (command.compare(0, 23, "add_bootstrap_peer_hint") == 0 ||
command.compare(0, 24, "add_bootstrap_peer_hintv") == 0) {
if (!_add_bootstrap_peer_hint(command, cmdmap, out))
goto abort;
} else if (command == "quorum enter") {
elector.start_participating();
start_election();
out << "started responding to quorum, initiated new election";
} else if (command == "quorum exit") {
start_election();
elector.stop_participating();
out << "stopped responding to quorum, initiated new election";
} else if (command == "ops") {
(void)op_tracker.dump_ops_in_flight(f);
} else if (command == "sessions") {
f->open_array_section("sessions");
for (auto p : session_map.sessions) {
f->dump_object("session", *p);
}
f->close_section();
} else if (command == "dump_historic_ops") {
if (!op_tracker.dump_historic_ops(f)) {
err << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
please enable \"mon_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
}
} else if (command == "dump_historic_ops_by_duration" ) {
if (op_tracker.dump_historic_ops(f, true)) {
err << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
please enable \"mon_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
}
} else if (command == "dump_historic_slow_ops") {
if (op_tracker.dump_historic_slow_ops(f, {})) {
err << "op_tracker tracking is not enabled now, so no ops are tracked currently, even those get stuck. \
please enable \"mon_enable_op_tracker\", and the tracker will start to track new ops received afterwards.";
}
} else if (command == "quorum") {
string quorumcmd;
cmd_getval(cmdmap, "quorumcmd", quorumcmd);
if (quorumcmd == "exit") {
start_election();
elector.stop_participating();
out << "stopped responding to quorum, initiated new election" << std::endl;
} else if (quorumcmd == "enter") {
elector.start_participating();
start_election();
out << "started responding to quorum, initiated new election" << std::endl;
} else {
err << "needs a valid 'quorum' command" << std::endl;
}
} else if (command == "connection scores dump") {
if (!get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_PINGING)) {
err << "Not all monitors support changing election strategies; \
please upgrade them first!";
}
elector.dump_connection_scores(f);
} else if (command == "connection scores reset") {
if (!get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_PINGING)) {
err << "Not all monitors support changing election strategies; \
please upgrade them first!";
}
elector.notify_clear_peer_state();
} else if (command == "smart") {
string want_devid;
cmd_getval(cmdmap, "devid", want_devid);
string devname = store->get_devname();
if (devname.empty()) {
err << "could not determine device name for " << store->get_path();
r = -ENOENT;
goto abort;
}
set<string> devnames;
get_raw_devices(devname, &devnames);
json_spirit::mObject json_map;
uint64_t smart_timeout = cct->_conf.get_val<uint64_t>(
"mon_smart_report_timeout");
for (auto& devname : devnames) {
string err;
string devid = get_device_id(devname, &err);
if (want_devid.size() && want_devid != devid) {
derr << "get_device_id failed on " << devname << ": " << err << dendl;
continue;
}
json_spirit::mValue smart_json;
if (block_device_get_metrics(devname, smart_timeout,
&smart_json)) {
dout(10) << "block_device_get_metrics failed for /dev/" << devname
<< dendl;
continue;
}
json_map[devid] = smart_json;
}
json_spirit::write(json_map, out, json_spirit::pretty_print);
} else if (command == "heap") {
if (!ceph_using_tcmalloc()) {
err << "could not issue heap profiler command -- not using tcmalloc!";
r = -EOPNOTSUPP;
goto abort;
}
string cmd;
if (!cmd_getval(cmdmap, "heapcmd", cmd)) {
err << "unable to get value for command \"" << cmd << "\"";
r = -EINVAL;
goto abort;
}
std::vector<std::string> cmd_vec;
get_str_vec(cmd, cmd_vec);
string val;
if (cmd_getval(cmdmap, "value", val)) {
cmd_vec.push_back(val);
}
ceph_heap_profiler_handle_command(cmd_vec, out);
} else if (command == "compact") {
dout(1) << "triggering manual compaction" << dendl;
auto start = ceph::coarse_mono_clock::now();
store->compact_async();
auto end = ceph::coarse_mono_clock::now();
auto duration = ceph::to_seconds<double>(end - start);
dout(1) << "finished manual compaction in "
<< duration << " seconds" << dendl;
out << "compacted " << g_conf().get_val<std::string>("mon_keyvaluedb")
<< " in " << duration << " seconds";
} else {
ceph_abort_msg("bad AdminSocket command binding");
}
(read_only ? audit_clog->debug() : audit_clog->info())
<< "from='admin socket' "
<< "entity='admin socket' "
<< "cmd=" << command << " "
<< "args=" << args << ": finished";
return r;
abort:
(read_only ? audit_clog->debug() : audit_clog->info())
<< "from='admin socket' "
<< "entity='admin socket' "
<< "cmd=" << command << " "
<< "args=" << args << ": aborted";
return r;
}
void Monitor::handle_signal(int signum)
{
derr << "*** Got Signal " << sig_str(signum) << " ***" << dendl;
if (signum == SIGHUP) {
sighup_handler(signum);
logmon()->reopen_logs();
} else {
ceph_assert(signum == SIGINT || signum == SIGTERM);
shutdown();
}
}
CompatSet Monitor::get_initial_supported_features()
{
CompatSet::FeatureSet ceph_mon_feature_compat;
CompatSet::FeatureSet ceph_mon_feature_ro_compat;
CompatSet::FeatureSet ceph_mon_feature_incompat;
ceph_mon_feature_incompat.insert(CEPH_MON_FEATURE_INCOMPAT_BASE);
ceph_mon_feature_incompat.insert(CEPH_MON_FEATURE_INCOMPAT_SINGLE_PAXOS);
return CompatSet(ceph_mon_feature_compat, ceph_mon_feature_ro_compat,
ceph_mon_feature_incompat);
}
CompatSet Monitor::get_supported_features()
{
CompatSet compat = get_initial_supported_features();
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OSD_ERASURE_CODES);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OSDMAP_ENC);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V2);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V3);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_KRAKEN);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_LUMINOUS);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_MIMIC);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_NAUTILUS);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OCTOPUS);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_PACIFIC);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_QUINCY);
compat.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_REEF);
return compat;
}
CompatSet Monitor::get_legacy_features()
{
CompatSet::FeatureSet ceph_mon_feature_compat;
CompatSet::FeatureSet ceph_mon_feature_ro_compat;
CompatSet::FeatureSet ceph_mon_feature_incompat;
ceph_mon_feature_incompat.insert(CEPH_MON_FEATURE_INCOMPAT_BASE);
return CompatSet(ceph_mon_feature_compat, ceph_mon_feature_ro_compat,
ceph_mon_feature_incompat);
}
int Monitor::check_features(MonitorDBStore *store)
{
CompatSet required = get_supported_features();
CompatSet ondisk;
read_features_off_disk(store, &ondisk);
if (!required.writeable(ondisk)) {
CompatSet diff = required.unsupported(ondisk);
generic_derr << "ERROR: on disk data includes unsupported features: " << diff << dendl;
return -EPERM;
}
return 0;
}
void Monitor::read_features_off_disk(MonitorDBStore *store, CompatSet *features)
{
bufferlist featuresbl;
store->get(MONITOR_NAME, COMPAT_SET_LOC, featuresbl);
if (featuresbl.length() == 0) {
generic_dout(0) << "WARNING: mon fs missing feature list.\n"
<< "Assuming it is old-style and introducing one." << dendl;
//we only want the baseline ~v.18 features assumed to be on disk.
//If new features are introduced this code needs to disappear or
//be made smarter.
*features = get_legacy_features();
features->encode(featuresbl);
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->put(MONITOR_NAME, COMPAT_SET_LOC, featuresbl);
store->apply_transaction(t);
} else {
auto it = featuresbl.cbegin();
features->decode(it);
}
}
void Monitor::read_features()
{
read_features_off_disk(store, &features);
dout(10) << "features " << features << dendl;
calc_quorum_requirements();
dout(10) << "required_features " << required_features << dendl;
}
void Monitor::write_features(MonitorDBStore::TransactionRef t)
{
bufferlist bl;
features.encode(bl);
t->put(MONITOR_NAME, COMPAT_SET_LOC, bl);
}
const char** Monitor::get_tracked_conf_keys() const
{
static const char* KEYS[] = {
"crushtool", // helpful for testing
"mon_election_timeout",
"mon_lease",
"mon_lease_renew_interval_factor",
"mon_lease_ack_timeout_factor",
"mon_accept_timeout_factor",
// clog & admin clog
"clog_to_monitors",
"clog_to_syslog",
"clog_to_syslog_facility",
"clog_to_syslog_level",
"clog_to_graylog",
"clog_to_graylog_host",
"clog_to_graylog_port",
"mon_cluster_log_to_file",
"host",
"fsid",
// periodic health to clog
"mon_health_to_clog",
"mon_health_to_clog_interval",
"mon_health_to_clog_tick_interval",
// scrub interval
"mon_scrub_interval",
"mon_allow_pool_delete",
// osdmap pruning - observed, not handled.
"mon_osdmap_full_prune_enabled",
"mon_osdmap_full_prune_min",
"mon_osdmap_full_prune_interval",
"mon_osdmap_full_prune_txsize",
// debug options - observed, not handled
"mon_debug_extra_checks",
"mon_debug_block_osdmap_trim",
NULL
};
return KEYS;
}
void Monitor::handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed)
{
sanitize_options();
dout(10) << __func__ << " " << changed << dendl;
if (changed.count("clog_to_monitors") ||
changed.count("clog_to_syslog") ||
changed.count("clog_to_syslog_level") ||
changed.count("clog_to_syslog_facility") ||
changed.count("clog_to_graylog") ||
changed.count("clog_to_graylog_host") ||
changed.count("clog_to_graylog_port") ||
changed.count("host") ||
changed.count("fsid")) {
update_log_clients();
}
if (changed.count("mon_health_to_clog") ||
changed.count("mon_health_to_clog_interval") ||
changed.count("mon_health_to_clog_tick_interval")) {
finisher.queue(new C_MonContext{this, [this, changed](int) {
std::lock_guard l{lock};
health_to_clog_update_conf(changed);
}});
}
if (changed.count("mon_scrub_interval")) {
auto scrub_interval =
conf.get_val<std::chrono::seconds>("mon_scrub_interval");
finisher.queue(new C_MonContext{this, [this, scrub_interval](int) {
std::lock_guard l{lock};
scrub_update_interval(scrub_interval);
}});
}
}
void Monitor::update_log_clients()
{
clog->parse_client_options(g_ceph_context);
audit_clog->parse_client_options(g_ceph_context);
}
int Monitor::sanitize_options()
{
int r = 0;
// mon_lease must be greater than mon_lease_renewal; otherwise we
// may incur in leases expiring before they are renewed.
if (g_conf()->mon_lease_renew_interval_factor >= 1.0) {
clog->error() << "mon_lease_renew_interval_factor ("
<< g_conf()->mon_lease_renew_interval_factor
<< ") must be less than 1.0";
r = -EINVAL;
}
// mon_lease_ack_timeout must be greater than mon_lease to make sure we've
// got time to renew the lease and get an ack for it. Having both options
// with the same value, for a given small vale, could mean timing out if
// the monitors happened to be overloaded -- or even under normal load for
// a small enough value.
if (g_conf()->mon_lease_ack_timeout_factor <= 1.0) {
clog->error() << "mon_lease_ack_timeout_factor ("
<< g_conf()->mon_lease_ack_timeout_factor
<< ") must be greater than 1.0";
r = -EINVAL;
}
return r;
}
int Monitor::preinit()
{
std::unique_lock l(lock);
dout(1) << "preinit fsid " << monmap->fsid << dendl;
int r = sanitize_options();
if (r < 0) {
derr << "option sanitization failed!" << dendl;
return r;
}
ceph_assert(!logger);
{
PerfCountersBuilder pcb(g_ceph_context, "mon", l_mon_first, l_mon_last);
pcb.add_u64(l_mon_num_sessions, "num_sessions", "Open sessions", "sess",
PerfCountersBuilder::PRIO_USEFUL);
pcb.add_u64_counter(l_mon_session_add, "session_add", "Created sessions",
"sadd", PerfCountersBuilder::PRIO_INTERESTING);
pcb.add_u64_counter(l_mon_session_rm, "session_rm", "Removed sessions",
"srm", PerfCountersBuilder::PRIO_INTERESTING);
pcb.add_u64_counter(l_mon_session_trim, "session_trim", "Trimmed sessions",
"strm", PerfCountersBuilder::PRIO_USEFUL);
pcb.add_u64_counter(l_mon_num_elections, "num_elections", "Elections participated in",
"ecnt", PerfCountersBuilder::PRIO_USEFUL);
pcb.add_u64_counter(l_mon_election_call, "election_call", "Elections started",
"estt", PerfCountersBuilder::PRIO_INTERESTING);
pcb.add_u64_counter(l_mon_election_win, "election_win", "Elections won",
"ewon", PerfCountersBuilder::PRIO_INTERESTING);
pcb.add_u64_counter(l_mon_election_lose, "election_lose", "Elections lost",
"elst", PerfCountersBuilder::PRIO_INTERESTING);
logger = pcb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
}
ceph_assert(!cluster_logger);
{
PerfCountersBuilder pcb(g_ceph_context, "cluster", l_cluster_first, l_cluster_last);
pcb.add_u64(l_cluster_num_mon, "num_mon", "Monitors");
pcb.add_u64(l_cluster_num_mon_quorum, "num_mon_quorum", "Monitors in quorum");
pcb.add_u64(l_cluster_num_osd, "num_osd", "OSDs");
pcb.add_u64(l_cluster_num_osd_up, "num_osd_up", "OSDs that are up");
pcb.add_u64(l_cluster_num_osd_in, "num_osd_in", "OSD in state \"in\" (they are in cluster)");
pcb.add_u64(l_cluster_osd_epoch, "osd_epoch", "Current epoch of OSD map");
pcb.add_u64(l_cluster_osd_bytes, "osd_bytes", "Total capacity of cluster", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_u64(l_cluster_osd_bytes_used, "osd_bytes_used", "Used space", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_u64(l_cluster_osd_bytes_avail, "osd_bytes_avail", "Available space", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_u64(l_cluster_num_pool, "num_pool", "Pools");
pcb.add_u64(l_cluster_num_pg, "num_pg", "Placement groups");
pcb.add_u64(l_cluster_num_pg_active_clean, "num_pg_active_clean", "Placement groups in active+clean state");
pcb.add_u64(l_cluster_num_pg_active, "num_pg_active", "Placement groups in active state");
pcb.add_u64(l_cluster_num_pg_peering, "num_pg_peering", "Placement groups in peering state");
pcb.add_u64(l_cluster_num_object, "num_object", "Objects");
pcb.add_u64(l_cluster_num_object_degraded, "num_object_degraded", "Degraded (missing replicas) objects");
pcb.add_u64(l_cluster_num_object_misplaced, "num_object_misplaced", "Misplaced (wrong location in the cluster) objects");
pcb.add_u64(l_cluster_num_object_unfound, "num_object_unfound", "Unfound objects");
pcb.add_u64(l_cluster_num_bytes, "num_bytes", "Size of all objects", NULL, 0, unit_t(UNIT_BYTES));
cluster_logger = pcb.create_perf_counters();
}
paxos->init_logger();
// verify cluster_uuid
{
int r = check_fsid();
if (r == -ENOENT)
r = write_fsid();
if (r < 0) {
return r;
}
}
// open compatset
read_features();
// have we ever joined a quorum?
has_ever_joined = (store->get(MONITOR_NAME, "joined") != 0);
dout(10) << "has_ever_joined = " << (int)has_ever_joined << dendl;
if (!has_ever_joined) {
// impose initial quorum restrictions?
list<string> initial_members;
get_str_list(g_conf()->mon_initial_members, initial_members);
if (!initial_members.empty()) {
dout(1) << " initial_members " << initial_members << ", filtering seed monmap" << dendl;
monmap->set_initial_members(
g_ceph_context, initial_members, name, messenger->get_myaddrs(),
&extra_probe_peers);
dout(10) << " monmap is " << *monmap << dendl;
dout(10) << " extra probe peers " << extra_probe_peers << dendl;
}
} else if (!monmap->contains(name)) {
derr << "not in monmap and have been in a quorum before; "
<< "must have been removed" << dendl;
if (g_conf()->mon_force_quorum_join) {
dout(0) << "we should have died but "
<< "'mon_force_quorum_join' is set -- allowing boot" << dendl;
} else {
derr << "commit suicide!" << dendl;
return -ENOENT;
}
}
{
// We have a potentially inconsistent store state in hands. Get rid of it
// and start fresh.
bool clear_store = false;
if (store->exists("mon_sync", "in_sync")) {
dout(1) << __func__ << " clean up potentially inconsistent store state"
<< dendl;
clear_store = true;
}
if (store->get("mon_sync", "force_sync") > 0) {
dout(1) << __func__ << " force sync by clearing store state" << dendl;
clear_store = true;
}
if (clear_store) {
set<string> sync_prefixes = get_sync_targets_names();
store->clear(sync_prefixes);
}
}
sync_last_committed_floor = store->get("mon_sync", "last_committed_floor");
dout(10) << "sync_last_committed_floor " << sync_last_committed_floor << dendl;
init_paxos();
if (is_keyring_required()) {
// we need to bootstrap authentication keys so we can form an
// initial quorum.
if (authmon()->get_last_committed() == 0) {
dout(10) << "loading initial keyring to bootstrap authentication for mkfs" << dendl;
bufferlist bl;
int err = store->get("mkfs", "keyring", bl);
if (err == 0 && bl.length() > 0) {
// Attempt to decode and extract keyring only if it is found.
KeyRing keyring;
auto p = bl.cbegin();
decode(keyring, p);
extract_save_mon_key(keyring);
}
}
string keyring_loc = g_conf()->mon_data + "/keyring";
r = keyring.load(cct, keyring_loc);
if (r < 0) {
EntityName mon_name;
mon_name.set_type(CEPH_ENTITY_TYPE_MON);
EntityAuth mon_key;
if (key_server.get_auth(mon_name, mon_key)) {
dout(1) << "copying mon. key from old db to external keyring" << dendl;
keyring.add(mon_name, mon_key);
bufferlist bl;
keyring.encode_plaintext(bl);
write_default_keyring(bl);
} else {
derr << "unable to load initial keyring " << g_conf()->keyring << dendl;
return r;
}
}
}
admin_hook = new AdminHook(this);
AdminSocket* admin_socket = cct->get_admin_socket();
// unlock while registering to avoid mon_lock -> admin socket lock dependency.
l.unlock();
// register tell/asock commands
for (const auto& command : local_mon_commands) {
if (!command.is_tell()) {
continue;
}
const auto prefix = cmddesc_get_prefix(command.cmdstring);
if (prefix == "injectargs" ||
prefix == "version" ||
prefix == "tell") {
// not registerd by me
continue;
}
r = admin_socket->register_command(command.cmdstring, admin_hook,
command.helpstring);
ceph_assert(r == 0);
}
l.lock();
// add ourselves as a conf observer
g_conf().add_observer(this);
messenger->set_auth_client(this);
messenger->set_auth_server(this);
mgr_messenger->set_auth_client(this);
auth_registry.refresh_config();
return 0;
}
int Monitor::init()
{
dout(2) << "init" << dendl;
std::lock_guard l(lock);
finisher.start();
// start ticker
timer.init();
new_tick();
cpu_tp.start();
// i'm ready!
messenger->add_dispatcher_tail(this);
// kickstart pet mgrclient
mgr_client.init();
mgr_messenger->add_dispatcher_tail(&mgr_client);
mgr_messenger->add_dispatcher_tail(this); // for auth ms_* calls
mgrmon()->prime_mgr_client();
state = STATE_PROBING;
bootstrap();
if (!elector.peer_tracker_is_clean()){
dout(10) << "peer_tracker looks inconsistent"
<< " previous bad logic, clearing ..." << dendl;
elector.notify_clear_peer_state();
}
// add features of myself into feature_map
session_map.feature_map.add_mon(con_self->get_features());
return 0;
}
void Monitor::init_paxos()
{
dout(10) << __func__ << dendl;
paxos->init();
// init services
for (auto& svc : paxos_service) {
svc->init();
}
refresh_from_paxos(NULL);
}
void Monitor::refresh_from_paxos(bool *need_bootstrap)
{
dout(10) << __func__ << dendl;
bufferlist bl;
int r = store->get(MONITOR_NAME, "cluster_fingerprint", bl);
if (r >= 0) {
try {
auto p = bl.cbegin();
decode(fingerprint, p);
}
catch (ceph::buffer::error& e) {
dout(10) << __func__ << " failed to decode cluster_fingerprint" << dendl;
}
} else {
dout(10) << __func__ << " no cluster_fingerprint" << dendl;
}
for (auto& svc : paxos_service) {
svc->refresh(need_bootstrap);
}
for (auto& svc : paxos_service) {
svc->post_refresh();
}
load_metadata();
}
void Monitor::register_cluster_logger()
{
if (!cluster_logger_registered) {
dout(10) << "register_cluster_logger" << dendl;
cluster_logger_registered = true;
cct->get_perfcounters_collection()->add(cluster_logger);
} else {
dout(10) << "register_cluster_logger - already registered" << dendl;
}
}
void Monitor::unregister_cluster_logger()
{
if (cluster_logger_registered) {
dout(10) << "unregister_cluster_logger" << dendl;
cluster_logger_registered = false;
cct->get_perfcounters_collection()->remove(cluster_logger);
} else {
dout(10) << "unregister_cluster_logger - not registered" << dendl;
}
}
void Monitor::update_logger()
{
cluster_logger->set(l_cluster_num_mon, monmap->size());
cluster_logger->set(l_cluster_num_mon_quorum, quorum.size());
}
void Monitor::shutdown()
{
dout(1) << "shutdown" << dendl;
lock.lock();
wait_for_paxos_write();
{
std::lock_guard l(auth_lock);
authmon()->_set_mon_num_rank(0, 0);
}
state = STATE_SHUTDOWN;
lock.unlock();
g_conf().remove_observer(this);
lock.lock();
if (admin_hook) {
cct->get_admin_socket()->unregister_commands(admin_hook);
delete admin_hook;
admin_hook = NULL;
}
elector.shutdown();
mgr_client.shutdown();
lock.unlock();
finisher.wait_for_empty();
finisher.stop();
lock.lock();
// clean up
paxos->shutdown();
for (auto& svc : paxos_service) {
svc->shutdown();
}
finish_contexts(g_ceph_context, waitfor_quorum, -ECANCELED);
finish_contexts(g_ceph_context, maybe_wait_for_quorum, -ECANCELED);
timer.shutdown();
cpu_tp.stop();
remove_all_sessions();
log_client.shutdown();
// unlock before msgr shutdown...
lock.unlock();
// shutdown messenger before removing logger from perfcounter collection,
// otherwise _ms_dispatch() will try to update deleted logger
messenger->shutdown();
mgr_messenger->shutdown();
if (logger) {
cct->get_perfcounters_collection()->remove(logger);
}
if (cluster_logger) {
if (cluster_logger_registered)
cct->get_perfcounters_collection()->remove(cluster_logger);
delete cluster_logger;
cluster_logger = NULL;
}
}
void Monitor::wait_for_paxos_write()
{
if (paxos->is_writing() || paxos->is_writing_previous()) {
dout(10) << __func__ << " flushing pending write" << dendl;
lock.unlock();
store->flush();
lock.lock();
dout(10) << __func__ << " flushed pending write" << dendl;
}
}
void Monitor::respawn()
{
// --- WARNING TO FUTURE COPY/PASTERS ---
// You must also add a call like
//
// ceph_pthread_setname(pthread_self(), "ceph-mon");
//
// to main() so that /proc/$pid/stat field 2 contains "(ceph-mon)"
// instead of "(exe)", so that killall (and log rotation) will work.
dout(0) << __func__ << dendl;
char *new_argv[orig_argc+1];
dout(1) << " e: '" << orig_argv[0] << "'" << dendl;
for (int i=0; i<orig_argc; i++) {
new_argv[i] = (char *)orig_argv[i];
dout(1) << " " << i << ": '" << orig_argv[i] << "'" << dendl;
}
new_argv[orig_argc] = NULL;
/* Determine the path to our executable, test if Linux /proc/self/exe exists.
* This allows us to exec the same executable even if it has since been
* unlinked.
*/
char exe_path[PATH_MAX] = "";
#ifdef PROCPREFIX
if (readlink(PROCPREFIX "/proc/self/exe", exe_path, PATH_MAX-1) != -1) {
dout(1) << "respawning with exe " << exe_path << dendl;
strcpy(exe_path, PROCPREFIX "/proc/self/exe");
} else {
#else
{
#endif
/* Print CWD for the user's interest */
char buf[PATH_MAX];
char *cwd = getcwd(buf, sizeof(buf));
ceph_assert(cwd);
dout(1) << " cwd " << cwd << dendl;
/* Fall back to a best-effort: just running in our CWD */
strncpy(exe_path, orig_argv[0], PATH_MAX-1);
}
dout(1) << " exe_path " << exe_path << dendl;
unblock_all_signals(NULL);
execv(exe_path, new_argv);
dout(0) << "respawn execv " << orig_argv[0]
<< " failed with " << cpp_strerror(errno) << dendl;
// We have to assert out here, because suicide() returns, and callers
// to respawn expect it never to return.
ceph_abort();
}
void Monitor::bootstrap()
{
dout(10) << "bootstrap" << dendl;
wait_for_paxos_write();
sync_reset_requester();
unregister_cluster_logger();
cancel_probe_timeout();
if (monmap->get_epoch() == 0) {
dout(10) << "reverting to legacy ranks for seed monmap (epoch 0)" << dendl;
monmap->calc_legacy_ranks();
}
dout(10) << "monmap " << *monmap << dendl;
{
auto from_release = monmap->min_mon_release;
ostringstream err;
if (!can_upgrade_from(from_release, "min_mon_release", err)) {
derr << "current monmap has " << err.str() << " stopping." << dendl;
exit(0);
}
}
// note my rank
int newrank = monmap->get_rank(messenger->get_myaddrs());
if (newrank < 0 && rank >= 0) {
// was i ever part of the quorum?
if (has_ever_joined) {
dout(0) << " removed from monmap, suicide." << dendl;
exit(0);
}
elector.notify_clear_peer_state();
}
if (newrank >= 0 &&
monmap->get_addrs(newrank) != messenger->get_myaddrs()) {
dout(0) << " monmap addrs for rank " << newrank << " changed, i am "
<< messenger->get_myaddrs()
<< ", monmap is " << monmap->get_addrs(newrank) << ", respawning"
<< dendl;
if (monmap->get_epoch()) {
// store this map in temp mon_sync location so that we use it on
// our next startup
derr << " stashing newest monmap " << monmap->get_epoch()
<< " for next startup" << dendl;
bufferlist bl;
monmap->encode(bl, -1);
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->put("mon_sync", "temp_newer_monmap", bl);
store->apply_transaction(t);
}
respawn();
}
if (newrank != rank) {
dout(0) << " my rank is now " << newrank << " (was " << rank << ")" << dendl;
messenger->set_myname(entity_name_t::MON(newrank));
rank = newrank;
elector.notify_rank_changed(rank);
// reset all connections, or else our peers will think we are someone else.
messenger->mark_down_all();
}
// reset
state = STATE_PROBING;
_reset();
// sync store
if (g_conf()->mon_compact_on_bootstrap) {
dout(10) << "bootstrap -- triggering compaction" << dendl;
store->compact();
dout(10) << "bootstrap -- finished compaction" << dendl;
}
// stretch mode bits
set_elector_disallowed_leaders(false);
// singleton monitor?
if (monmap->size() == 1 && rank == 0) {
win_standalone_election();
return;
}
reset_probe_timeout();
// i'm outside the quorum
if (monmap->contains(name))
outside_quorum.insert(name);
// probe monitors
dout(10) << "probing other monitors" << dendl;
for (unsigned i = 0; i < monmap->size(); i++) {
if ((int)i != rank)
send_mon_message(
new MMonProbe(monmap->fsid, MMonProbe::OP_PROBE, name, has_ever_joined,
ceph_release()),
i);
}
for (auto& av : extra_probe_peers) {
if (av != messenger->get_myaddrs()) {
messenger->send_to_mon(
new MMonProbe(monmap->fsid, MMonProbe::OP_PROBE, name, has_ever_joined,
ceph_release()),
av);
}
}
}
bool Monitor::_add_bootstrap_peer_hint(std::string_view cmd,
const cmdmap_t& cmdmap,
ostream& ss)
{
if (is_leader() || is_peon()) {
ss << "mon already active; ignoring bootstrap hint";
return true;
}
entity_addrvec_t addrs;
string addrstr;
if (cmd_getval(cmdmap, "addr", addrstr)) {
dout(10) << "_add_bootstrap_peer_hint '" << cmd << "' addr '"
<< addrstr << "'" << dendl;
entity_addr_t addr;
if (!addr.parse(addrstr, entity_addr_t::TYPE_ANY)) {
ss << "failed to parse addrs '" << addrstr
<< "'; syntax is 'add_bootstrap_peer_hint ip[:port]'";
return false;
}
addrs.v.push_back(addr);
if (addr.get_port() == 0) {
addrs.v[0].set_type(entity_addr_t::TYPE_MSGR2);
addrs.v[0].set_port(CEPH_MON_PORT_IANA);
addrs.v.push_back(addr);
addrs.v[1].set_type(entity_addr_t::TYPE_LEGACY);
addrs.v[1].set_port(CEPH_MON_PORT_LEGACY);
} else if (addr.get_type() == entity_addr_t::TYPE_ANY) {
if (addr.get_port() == CEPH_MON_PORT_LEGACY) {
addrs.v[0].set_type(entity_addr_t::TYPE_LEGACY);
} else {
addrs.v[0].set_type(entity_addr_t::TYPE_MSGR2);
}
}
} else if (cmd_getval(cmdmap, "addrv", addrstr)) {
dout(10) << "_add_bootstrap_peer_hintv '" << cmd << "' addrv '"
<< addrstr << "'" << dendl;
const char *end = 0;
if (!addrs.parse(addrstr.c_str(), &end)) {
ss << "failed to parse addrs '" << addrstr
<< "'; syntax is 'add_bootstrap_peer_hintv v2:ip:port[,v1:ip:port]'";
return false;
}
} else {
ss << "no addr or addrv provided";
return false;
}
extra_probe_peers.insert(addrs);
ss << "adding peer " << addrs << " to list: " << extra_probe_peers;
return true;
}
// called by bootstrap(), or on leader|peon -> electing
void Monitor::_reset()
{
dout(10) << __func__ << dendl;
// disable authentication
{
std::lock_guard l(auth_lock);
authmon()->_set_mon_num_rank(0, 0);
}
cancel_probe_timeout();
timecheck_finish();
health_events_cleanup();
health_check_log_times.clear();
scrub_event_cancel();
leader_since = utime_t();
quorum_since = {};
if (!quorum.empty()) {
exited_quorum = ceph_clock_now();
}
quorum.clear();
outside_quorum.clear();
quorum_feature_map.clear();
scrub_reset();
paxos->restart();
for (auto& svc : paxos_service) {
svc->restart();
}
}
// -----------------------------------------------------------
// sync
set<string> Monitor::get_sync_targets_names()
{
set<string> targets;
targets.insert(paxos->get_name());
for (auto& svc : paxos_service) {
svc->get_store_prefixes(targets);
}
return targets;
}
void Monitor::sync_timeout()
{
dout(10) << __func__ << dendl;
ceph_assert(state == STATE_SYNCHRONIZING);
bootstrap();
}
void Monitor::sync_obtain_latest_monmap(bufferlist &bl)
{
dout(1) << __func__ << dendl;
MonMap latest_monmap;
// Grab latest monmap from MonmapMonitor
bufferlist monmon_bl;
int err = monmon()->get_monmap(monmon_bl);
if (err < 0) {
if (err != -ENOENT) {
derr << __func__
<< " something wrong happened while reading the store: "
<< cpp_strerror(err) << dendl;
ceph_abort_msg("error reading the store");
}
} else {
latest_monmap.decode(monmon_bl);
}
// Grab last backed up monmap (if any) and compare epochs
if (store->exists("mon_sync", "latest_monmap")) {
bufferlist backup_bl;
int err = store->get("mon_sync", "latest_monmap", backup_bl);
if (err < 0) {
derr << __func__
<< " something wrong happened while reading the store: "
<< cpp_strerror(err) << dendl;
ceph_abort_msg("error reading the store");
}
ceph_assert(backup_bl.length() > 0);
MonMap backup_monmap;
backup_monmap.decode(backup_bl);
if (backup_monmap.epoch > latest_monmap.epoch)
latest_monmap = backup_monmap;
}
// Check if our current monmap's epoch is greater than the one we've
// got so far.
if (monmap->epoch > latest_monmap.epoch)
latest_monmap = *monmap;
dout(1) << __func__ << " obtained monmap e" << latest_monmap.epoch << dendl;
latest_monmap.encode(bl, CEPH_FEATURES_ALL);
}
void Monitor::sync_reset_requester()
{
dout(10) << __func__ << dendl;
if (sync_timeout_event) {
timer.cancel_event(sync_timeout_event);
sync_timeout_event = NULL;
}
sync_provider = entity_addrvec_t();
sync_cookie = 0;
sync_full = false;
sync_start_version = 0;
}
void Monitor::sync_reset_provider()
{
dout(10) << __func__ << dendl;
sync_providers.clear();
}
void Monitor::sync_start(entity_addrvec_t &addrs, bool full)
{
dout(10) << __func__ << " " << addrs << (full ? " full" : " recent") << dendl;
ceph_assert(state == STATE_PROBING ||
state == STATE_SYNCHRONIZING);
state = STATE_SYNCHRONIZING;
// make sure are not a provider for anyone!
sync_reset_provider();
sync_full = full;
if (sync_full) {
// stash key state, and mark that we are syncing
auto t(std::make_shared<MonitorDBStore::Transaction>());
sync_stash_critical_state(t);
t->put("mon_sync", "in_sync", 1);
sync_last_committed_floor = std::max(sync_last_committed_floor, paxos->get_version());
dout(10) << __func__ << " marking sync in progress, storing sync_last_committed_floor "
<< sync_last_committed_floor << dendl;
t->put("mon_sync", "last_committed_floor", sync_last_committed_floor);
store->apply_transaction(t);
ceph_assert(g_conf()->mon_sync_requester_kill_at != 1);
// clear the underlying store
set<string> targets = get_sync_targets_names();
dout(10) << __func__ << " clearing prefixes " << targets << dendl;
store->clear(targets);
// make sure paxos knows it has been reset. this prevents a
// bootstrap and then different probe reply order from possibly
// deciding a partial or no sync is needed.
paxos->init();
ceph_assert(g_conf()->mon_sync_requester_kill_at != 2);
}
// assume 'other' as the leader. We will update the leader once we receive
// a reply to the sync start.
sync_provider = addrs;
sync_reset_timeout();
MMonSync *m = new MMonSync(sync_full ? MMonSync::OP_GET_COOKIE_FULL : MMonSync::OP_GET_COOKIE_RECENT);
if (!sync_full)
m->last_committed = paxos->get_version();
messenger->send_to_mon(m, sync_provider);
}
void Monitor::sync_stash_critical_state(MonitorDBStore::TransactionRef t)
{
dout(10) << __func__ << dendl;
bufferlist backup_monmap;
sync_obtain_latest_monmap(backup_monmap);
ceph_assert(backup_monmap.length() > 0);
t->put("mon_sync", "latest_monmap", backup_monmap);
}
void Monitor::sync_reset_timeout()
{
dout(10) << __func__ << dendl;
if (sync_timeout_event)
timer.cancel_event(sync_timeout_event);
sync_timeout_event = timer.add_event_after(
g_conf()->mon_sync_timeout,
new C_MonContext{this, [this](int) {
sync_timeout();
}});
}
void Monitor::sync_finish(version_t last_committed)
{
dout(10) << __func__ << " lc " << last_committed << " from " << sync_provider << dendl;
ceph_assert(g_conf()->mon_sync_requester_kill_at != 7);
if (sync_full) {
// finalize the paxos commits
auto tx(std::make_shared<MonitorDBStore::Transaction>());
paxos->read_and_prepare_transactions(tx, sync_start_version,
last_committed);
tx->put(paxos->get_name(), "last_committed", last_committed);
dout(30) << __func__ << " final tx dump:\n";
JSONFormatter f(true);
tx->dump(&f);
f.flush(*_dout);
*_dout << dendl;
store->apply_transaction(tx);
}
ceph_assert(g_conf()->mon_sync_requester_kill_at != 8);
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->erase("mon_sync", "in_sync");
t->erase("mon_sync", "force_sync");
t->erase("mon_sync", "last_committed_floor");
store->apply_transaction(t);
ceph_assert(g_conf()->mon_sync_requester_kill_at != 9);
init_paxos();
ceph_assert(g_conf()->mon_sync_requester_kill_at != 10);
bootstrap();
}
void Monitor::handle_sync(MonOpRequestRef op)
{
auto m = op->get_req<MMonSync>();
dout(10) << __func__ << " " << *m << dendl;
switch (m->op) {
// provider ---------
case MMonSync::OP_GET_COOKIE_FULL:
case MMonSync::OP_GET_COOKIE_RECENT:
handle_sync_get_cookie(op);
break;
case MMonSync::OP_GET_CHUNK:
handle_sync_get_chunk(op);
break;
// client -----------
case MMonSync::OP_COOKIE:
handle_sync_cookie(op);
break;
case MMonSync::OP_CHUNK:
case MMonSync::OP_LAST_CHUNK:
handle_sync_chunk(op);
break;
case MMonSync::OP_NO_COOKIE:
handle_sync_no_cookie(op);
break;
default:
dout(0) << __func__ << " unknown op " << m->op << dendl;
ceph_abort_msg("unknown op");
}
}
// leader
void Monitor::_sync_reply_no_cookie(MonOpRequestRef op)
{
auto m = op->get_req<MMonSync>();
MMonSync *reply = new MMonSync(MMonSync::OP_NO_COOKIE, m->cookie);
m->get_connection()->send_message(reply);
}
void Monitor::handle_sync_get_cookie(MonOpRequestRef op)
{
auto m = op->get_req<MMonSync>();
if (is_synchronizing()) {
_sync_reply_no_cookie(op);
return;
}
ceph_assert(g_conf()->mon_sync_provider_kill_at != 1);
// make sure they can understand us.
if ((required_features ^ m->get_connection()->get_features()) &
required_features) {
dout(5) << " ignoring peer mon." << m->get_source().num()
<< " has features " << std::hex
<< m->get_connection()->get_features()
<< " but we require " << required_features << std::dec << dendl;
return;
}
// make up a unique cookie. include election epoch (which persists
// across restarts for the whole cluster) and a counter for this
// process instance. there is no need to be unique *across*
// monitors, though.
uint64_t cookie = ((unsigned long long)elector.get_epoch() << 24) + ++sync_provider_count;
ceph_assert(sync_providers.count(cookie) == 0);
dout(10) << __func__ << " cookie " << cookie << " for " << m->get_source_inst() << dendl;
SyncProvider& sp = sync_providers[cookie];
sp.cookie = cookie;
sp.addrs = m->get_source_addrs();
sp.reset_timeout(g_ceph_context, g_conf()->mon_sync_timeout * 2);
set<string> sync_targets;
if (m->op == MMonSync::OP_GET_COOKIE_FULL) {
// full scan
sync_targets = get_sync_targets_names();
sp.last_committed = paxos->get_version();
sp.synchronizer = store->get_synchronizer(sp.last_key, sync_targets);
sp.full = true;
dout(10) << __func__ << " will sync prefixes " << sync_targets << dendl;
} else {
// just catch up paxos
sp.last_committed = m->last_committed;
}
dout(10) << __func__ << " will sync from version " << sp.last_committed << dendl;
MMonSync *reply = new MMonSync(MMonSync::OP_COOKIE, sp.cookie);
reply->last_committed = sp.last_committed;
m->get_connection()->send_message(reply);
}
void Monitor::handle_sync_get_chunk(MonOpRequestRef op)
{
auto m = op->get_req<MMonSync>();
dout(10) << __func__ << " " << *m << dendl;
if (sync_providers.count(m->cookie) == 0) {
dout(10) << __func__ << " no cookie " << m->cookie << dendl;
_sync_reply_no_cookie(op);
return;
}
ceph_assert(g_conf()->mon_sync_provider_kill_at != 2);
SyncProvider& sp = sync_providers[m->cookie];
sp.reset_timeout(g_ceph_context, g_conf()->mon_sync_timeout * 2);
if (sp.last_committed < paxos->get_first_committed() &&
paxos->get_first_committed() > 1) {
dout(10) << __func__ << " sync requester fell behind paxos, their lc " << sp.last_committed
<< " < our fc " << paxos->get_first_committed() << dendl;
sync_providers.erase(m->cookie);
_sync_reply_no_cookie(op);
return;
}
MMonSync *reply = new MMonSync(MMonSync::OP_CHUNK, sp.cookie);
auto tx(std::make_shared<MonitorDBStore::Transaction>());
int bytes_left = g_conf()->mon_sync_max_payload_size;
int keys_left = g_conf()->mon_sync_max_payload_keys;
while (sp.last_committed < paxos->get_version() &&
bytes_left > 0 &&
keys_left > 0) {
bufferlist bl;
sp.last_committed++;
int err = store->get(paxos->get_name(), sp.last_committed, bl);
ceph_assert(err == 0);
tx->put(paxos->get_name(), sp.last_committed, bl);
bytes_left -= bl.length();
--keys_left;
dout(20) << __func__ << " including paxos state " << sp.last_committed
<< dendl;
}
reply->last_committed = sp.last_committed;
if (sp.full && bytes_left > 0 && keys_left > 0) {
sp.synchronizer->get_chunk_tx(tx, bytes_left, keys_left);
sp.last_key = sp.synchronizer->get_last_key();
reply->last_key = sp.last_key;
}
if ((sp.full && sp.synchronizer->has_next_chunk()) ||
sp.last_committed < paxos->get_version()) {
dout(10) << __func__ << " chunk, through version " << sp.last_committed
<< " key " << sp.last_key << dendl;
} else {
dout(10) << __func__ << " last chunk, through version " << sp.last_committed
<< " key " << sp.last_key << dendl;
reply->op = MMonSync::OP_LAST_CHUNK;
ceph_assert(g_conf()->mon_sync_provider_kill_at != 3);
// clean up our local state
sync_providers.erase(sp.cookie);
}
encode(*tx, reply->chunk_bl);
m->get_connection()->send_message(reply);
}
// requester
void Monitor::handle_sync_cookie(MonOpRequestRef op)
{
auto m = op->get_req<MMonSync>();
dout(10) << __func__ << " " << *m << dendl;
if (sync_cookie) {
dout(10) << __func__ << " already have a cookie, ignoring" << dendl;
return;
}
if (m->get_source_addrs() != sync_provider) {
dout(10) << __func__ << " source does not match, discarding" << dendl;
return;
}
sync_cookie = m->cookie;
sync_start_version = m->last_committed;
sync_reset_timeout();
sync_get_next_chunk();
ceph_assert(g_conf()->mon_sync_requester_kill_at != 3);
}
void Monitor::sync_get_next_chunk()
{
dout(20) << __func__ << " cookie " << sync_cookie << " provider " << sync_provider << dendl;
if (g_conf()->mon_inject_sync_get_chunk_delay > 0) {
dout(20) << __func__ << " injecting delay of " << g_conf()->mon_inject_sync_get_chunk_delay << dendl;
usleep((long long)(g_conf()->mon_inject_sync_get_chunk_delay * 1000000.0));
}
MMonSync *r = new MMonSync(MMonSync::OP_GET_CHUNK, sync_cookie);
messenger->send_to_mon(r, sync_provider);
ceph_assert(g_conf()->mon_sync_requester_kill_at != 4);
}
void Monitor::handle_sync_chunk(MonOpRequestRef op)
{
auto m = op->get_req<MMonSync>();
dout(10) << __func__ << " " << *m << dendl;
if (m->cookie != sync_cookie) {
dout(10) << __func__ << " cookie does not match, discarding" << dendl;
return;
}
if (m->get_source_addrs() != sync_provider) {
dout(10) << __func__ << " source does not match, discarding" << dendl;
return;
}
ceph_assert(state == STATE_SYNCHRONIZING);
ceph_assert(g_conf()->mon_sync_requester_kill_at != 5);
auto tx(std::make_shared<MonitorDBStore::Transaction>());
tx->append_from_encoded(m->chunk_bl);
dout(30) << __func__ << " tx dump:\n";
JSONFormatter f(true);
tx->dump(&f);
f.flush(*_dout);
*_dout << dendl;
store->apply_transaction(tx);
ceph_assert(g_conf()->mon_sync_requester_kill_at != 6);
if (!sync_full) {
dout(10) << __func__ << " applying recent paxos transactions as we go" << dendl;
auto tx(std::make_shared<MonitorDBStore::Transaction>());
paxos->read_and_prepare_transactions(tx, paxos->get_version() + 1,
m->last_committed);
tx->put(paxos->get_name(), "last_committed", m->last_committed);
dout(30) << __func__ << " tx dump:\n";
JSONFormatter f(true);
tx->dump(&f);
f.flush(*_dout);
*_dout << dendl;
store->apply_transaction(tx);
paxos->init(); // to refresh what we just wrote
}
if (m->op == MMonSync::OP_CHUNK) {
sync_reset_timeout();
sync_get_next_chunk();
} else if (m->op == MMonSync::OP_LAST_CHUNK) {
sync_finish(m->last_committed);
}
}
void Monitor::handle_sync_no_cookie(MonOpRequestRef op)
{
dout(10) << __func__ << dendl;
bootstrap();
}
void Monitor::sync_trim_providers()
{
dout(20) << __func__ << dendl;
utime_t now = ceph_clock_now();
map<uint64_t,SyncProvider>::iterator p = sync_providers.begin();
while (p != sync_providers.end()) {
if (now > p->second.timeout) {
dout(10) << __func__ << " expiring cookie " << p->second.cookie
<< " for " << p->second.addrs << dendl;
sync_providers.erase(p++);
} else {
++p;
}
}
}
// ---------------------------------------------------
// probe
void Monitor::cancel_probe_timeout()
{
if (probe_timeout_event) {
dout(10) << "cancel_probe_timeout " << probe_timeout_event << dendl;
timer.cancel_event(probe_timeout_event);
probe_timeout_event = NULL;
} else {
dout(10) << "cancel_probe_timeout (none scheduled)" << dendl;
}
}
void Monitor::reset_probe_timeout()
{
cancel_probe_timeout();
probe_timeout_event = new C_MonContext{this, [this](int r) {
probe_timeout(r);
}};
double t = g_conf()->mon_probe_timeout;
if (timer.add_event_after(t, probe_timeout_event)) {
dout(10) << "reset_probe_timeout " << probe_timeout_event
<< " after " << t << " seconds" << dendl;
} else {
probe_timeout_event = nullptr;
}
}
void Monitor::probe_timeout(int r)
{
dout(4) << "probe_timeout " << probe_timeout_event << dendl;
ceph_assert(is_probing() || is_synchronizing());
ceph_assert(probe_timeout_event);
probe_timeout_event = NULL;
bootstrap();
}
void Monitor::handle_probe(MonOpRequestRef op)
{
auto m = op->get_req<MMonProbe>();
dout(10) << "handle_probe " << *m << dendl;
if (m->fsid != monmap->fsid) {
dout(0) << "handle_probe ignoring fsid " << m->fsid << " != " << monmap->fsid << dendl;
return;
}
switch (m->op) {
case MMonProbe::OP_PROBE:
handle_probe_probe(op);
break;
case MMonProbe::OP_REPLY:
handle_probe_reply(op);
break;
case MMonProbe::OP_MISSING_FEATURES:
derr << __func__ << " require release " << (int)m->mon_release << " > "
<< (int)ceph_release()
<< ", or missing features (have " << CEPH_FEATURES_ALL
<< ", required " << m->required_features
<< ", missing " << (m->required_features & ~CEPH_FEATURES_ALL) << ")"
<< dendl;
break;
}
}
void Monitor::handle_probe_probe(MonOpRequestRef op)
{
auto m = op->get_req<MMonProbe>();
dout(10) << "handle_probe_probe " << m->get_source_inst() << " " << *m
<< " features " << m->get_connection()->get_features() << dendl;
uint64_t missing = required_features & ~m->get_connection()->get_features();
if ((m->mon_release != ceph_release_t::unknown &&
m->mon_release < monmap->min_mon_release) ||
missing) {
dout(1) << " peer " << m->get_source_addr()
<< " release " << m->mon_release
<< " < min_mon_release " << monmap->min_mon_release
<< ", or missing features " << missing << dendl;
MMonProbe *r = new MMonProbe(monmap->fsid, MMonProbe::OP_MISSING_FEATURES,
name, has_ever_joined, monmap->min_mon_release);
m->required_features = required_features;
m->get_connection()->send_message(r);
goto out;
}
if (!is_probing() && !is_synchronizing()) {
// If the probing mon is way ahead of us, we need to re-bootstrap.
// Normally we capture this case when we initially bootstrap, but
// it is possible we pass those checks (we overlap with
// quorum-to-be) but fail to join a quorum before it moves past
// us. We need to be kicked back to bootstrap so we can
// synchonize, not keep calling elections.
if (paxos->get_version() + 1 < m->paxos_first_version) {
dout(1) << " peer " << m->get_source_addr() << " has first_committed "
<< "ahead of us, re-bootstrapping" << dendl;
bootstrap();
goto out;
}
}
MMonProbe *r;
r = new MMonProbe(monmap->fsid, MMonProbe::OP_REPLY, name, has_ever_joined,
ceph_release());
r->name = name;
r->quorum = quorum;
r->leader = leader;
monmap->encode(r->monmap_bl, m->get_connection()->get_features());
r->paxos_first_version = paxos->get_first_committed();
r->paxos_last_version = paxos->get_version();
m->get_connection()->send_message(r);
// did we discover a peer here?
if (!monmap->contains(m->get_source_addr())) {
dout(1) << " adding peer " << m->get_source_addrs()
<< " to list of hints" << dendl;
extra_probe_peers.insert(m->get_source_addrs());
} else {
elector.begin_peer_ping(monmap->get_rank(m->get_source_addr()));
}
out:
return;
}
void Monitor::handle_probe_reply(MonOpRequestRef op)
{
auto m = op->get_req<MMonProbe>();
dout(10) << "handle_probe_reply " << m->get_source_inst()
<< " " << *m << dendl;
dout(10) << " monmap is " << *monmap << dendl;
// discover name and addrs during probing or electing states.
if (!is_probing() && !is_electing()) {
return;
}
// newer map, or they've joined a quorum and we haven't?
bufferlist mybl;
monmap->encode(mybl, m->get_connection()->get_features());
// make sure it's actually different; the checks below err toward
// taking the other guy's map, which could cause us to loop.
if (!mybl.contents_equal(m->monmap_bl)) {
MonMap *newmap = new MonMap;
newmap->decode(m->monmap_bl);
if (m->has_ever_joined && (newmap->get_epoch() > monmap->get_epoch() ||
!has_ever_joined)) {
dout(10) << " got newer/committed monmap epoch " << newmap->get_epoch()
<< ", mine was " << monmap->get_epoch() << dendl;
int epoch_diff = newmap->get_epoch() - monmap->get_epoch();
delete newmap;
monmap->decode(m->monmap_bl);
dout(20) << "has_ever_joined: " << has_ever_joined << dendl;
if (epoch_diff == 1 && has_ever_joined) {
notify_new_monmap(false);
} else {
notify_new_monmap(false, false);
elector.notify_clear_peer_state();
}
bootstrap();
return;
}
delete newmap;
}
// rename peer?
string peer_name = monmap->get_name(m->get_source_addr());
if (monmap->get_epoch() == 0 && peer_name.compare(0, 7, "noname-") == 0) {
dout(10) << " renaming peer " << m->get_source_addr() << " "
<< peer_name << " -> " << m->name << " in my monmap"
<< dendl;
monmap->rename(peer_name, m->name);
if (is_electing()) {
bootstrap();
return;
}
} else if (peer_name.size()) {
dout(10) << " peer name is " << peer_name << dendl;
} else {
dout(10) << " peer " << m->get_source_addr() << " not in map" << dendl;
}
// new initial peer?
if (monmap->get_epoch() == 0 &&
monmap->contains(m->name) &&
monmap->get_addrs(m->name).front().is_blank_ip()) {
dout(1) << " learned initial mon " << m->name
<< " addrs " << m->get_source_addrs() << dendl;
monmap->set_addrvec(m->name, m->get_source_addrs());
bootstrap();
return;
}
// end discover phase
if (!is_probing()) {
return;
}
ceph_assert(paxos != NULL);
if (is_synchronizing()) {
dout(10) << " currently syncing" << dendl;
return;
}
entity_addrvec_t other = m->get_source_addrs();
if (m->paxos_last_version < sync_last_committed_floor) {
dout(10) << " peer paxos versions [" << m->paxos_first_version
<< "," << m->paxos_last_version << "] < my sync_last_committed_floor "
<< sync_last_committed_floor << ", ignoring"
<< dendl;
} else {
if (paxos->get_version() < m->paxos_first_version &&
m->paxos_first_version > 1) { // no need to sync if we're 0 and they start at 1.
dout(10) << " peer paxos first versions [" << m->paxos_first_version
<< "," << m->paxos_last_version << "]"
<< " vs my version " << paxos->get_version()
<< " (too far ahead)"
<< dendl;
cancel_probe_timeout();
sync_start(other, true);
return;
}
if (paxos->get_version() + g_conf()->paxos_max_join_drift < m->paxos_last_version) {
dout(10) << " peer paxos last version " << m->paxos_last_version
<< " vs my version " << paxos->get_version()
<< " (too far ahead)"
<< dendl;
cancel_probe_timeout();
sync_start(other, false);
return;
}
}
// did the existing cluster complete upgrade to luminous?
if (osdmon()->osdmap.get_epoch()) {
if (osdmon()->osdmap.require_osd_release < ceph_release_t::luminous) {
derr << __func__ << " existing cluster has not completed upgrade to"
<< " luminous; 'ceph osd require_osd_release luminous' before"
<< " upgrading" << dendl;
exit(0);
}
if (!osdmon()->osdmap.test_flag(CEPH_OSDMAP_PURGED_SNAPDIRS) ||
!osdmon()->osdmap.test_flag(CEPH_OSDMAP_RECOVERY_DELETES)) {
derr << __func__ << " existing cluster has not completed a full luminous"
<< " scrub to purge legacy snapdir objects; please scrub before"
<< " upgrading beyond luminous." << dendl;
exit(0);
}
}
// is there an existing quorum?
if (m->quorum.size()) {
dout(10) << " existing quorum " << m->quorum << dendl;
dout(10) << " peer paxos version " << m->paxos_last_version
<< " vs my version " << paxos->get_version()
<< " (ok)"
<< dendl;
bool in_map = false;
const auto my_info = monmap->mon_info.find(name);
const map<string,string> *map_crush_loc{nullptr};
if (my_info != monmap->mon_info.end()) {
in_map = true;
map_crush_loc = &my_info->second.crush_loc;
}
if (in_map &&
!monmap->get_addrs(name).front().is_blank_ip() &&
(!need_set_crush_loc || (*map_crush_loc == crush_loc))) {
// i'm part of the cluster; just initiate a new election
start_election();
} else {
dout(10) << " ready to join, but i'm not in the monmap/"
"my addr is blank/location is wrong, trying to join" << dendl;
send_mon_message(new MMonJoin(monmap->fsid, name,
messenger->get_myaddrs(), crush_loc,
need_set_crush_loc),
m->leader);
}
} else {
if (monmap->contains(m->name)) {
dout(10) << " mon." << m->name << " is outside the quorum" << dendl;
outside_quorum.insert(m->name);
} else {
dout(10) << " mostly ignoring mon." << m->name << ", not part of monmap" << dendl;
return;
}
unsigned need = monmap->min_quorum_size();
dout(10) << " outside_quorum now " << outside_quorum << ", need " << need << dendl;
if (outside_quorum.size() >= need) {
if (outside_quorum.count(name)) {
dout(10) << " that's enough to form a new quorum, calling election" << dendl;
start_election();
} else {
dout(10) << " that's enough to form a new quorum, but it does not include me; waiting" << dendl;
}
} else {
dout(10) << " that's not yet enough for a new quorum, waiting" << dendl;
}
}
}
void Monitor::join_election()
{
dout(10) << __func__ << dendl;
wait_for_paxos_write();
_reset();
state = STATE_ELECTING;
logger->inc(l_mon_num_elections);
}
void Monitor::start_election()
{
dout(10) << "start_election" << dendl;
wait_for_paxos_write();
_reset();
state = STATE_ELECTING;
logger->inc(l_mon_num_elections);
logger->inc(l_mon_election_call);
clog->info() << "mon." << name << " calling monitor election";
elector.call_election();
}
void Monitor::win_standalone_election()
{
dout(1) << "win_standalone_election" << dendl;
// bump election epoch, in case the previous epoch included other
// monitors; we need to be able to make the distinction.
elector.declare_standalone_victory();
rank = monmap->get_rank(name);
ceph_assert(rank == 0);
set<int> q;
q.insert(rank);
map<int,Metadata> metadata;
collect_metadata(&metadata[0]);
win_election(elector.get_epoch(), q,
CEPH_FEATURES_ALL,
ceph::features::mon::get_supported(),
ceph_release(),
metadata);
}
const utime_t& Monitor::get_leader_since() const
{
ceph_assert(state == STATE_LEADER);
return leader_since;
}
epoch_t Monitor::get_epoch()
{
return elector.get_epoch();
}
void Monitor::_finish_svc_election()
{
ceph_assert(state == STATE_LEADER || state == STATE_PEON);
for (auto& svc : paxos_service) {
// we already called election_finished() on monmon(); avoid callig twice
if (state == STATE_LEADER && svc.get() == monmon())
continue;
svc->election_finished();
}
}
void Monitor::win_election(epoch_t epoch, const set<int>& active, uint64_t features,
const mon_feature_t& mon_features,
ceph_release_t min_mon_release,
const map<int,Metadata>& metadata)
{
dout(10) << __func__ << " epoch " << epoch << " quorum " << active
<< " features " << features
<< " mon_features " << mon_features
<< " min_mon_release " << min_mon_release
<< dendl;
ceph_assert(is_electing());
state = STATE_LEADER;
leader_since = ceph_clock_now();
quorum_since = mono_clock::now();
leader = rank;
quorum = active;
quorum_con_features = features;
quorum_mon_features = mon_features;
quorum_min_mon_release = min_mon_release;
pending_metadata = metadata;
outside_quorum.clear();
clog->info() << "mon." << name << " is new leader, mons " << get_quorum_names()
<< " in quorum (ranks " << quorum << ")";
set_leader_commands(get_local_commands(mon_features));
paxos->leader_init();
// NOTE: tell monmap monitor first. This is important for the
// bootstrap case to ensure that the very first paxos proposal
// codifies the monmap. Otherwise any manner of chaos can ensue
// when monitors are call elections or participating in a paxos
// round without agreeing on who the participants are.
monmon()->election_finished();
_finish_svc_election();
logger->inc(l_mon_election_win);
// inject new metadata in first transaction.
{
// include previous metadata for missing mons (that aren't part of
// the current quorum).
map<int,Metadata> m = metadata;
for (unsigned rank = 0; rank < monmap->size(); ++rank) {
if (m.count(rank) == 0 &&
mon_metadata.count(rank)) {
m[rank] = mon_metadata[rank];
}
}
// FIXME: This is a bit sloppy because we aren't guaranteed to submit
// a new transaction immediately after the election finishes. We should
// do that anyway for other reasons, though.
MonitorDBStore::TransactionRef t = paxos->get_pending_transaction();
bufferlist bl;
encode(m, bl);
t->put(MONITOR_STORE_PREFIX, "last_metadata", bl);
}
finish_election();
if (monmap->size() > 1 &&
monmap->get_epoch() > 0) {
timecheck_start();
health_tick_start();
// Freshen the health status before doing health_to_clog in case
// our just-completed election changed the health
healthmon()->wait_for_active_ctx(new LambdaContext([this](int r){
dout(20) << "healthmon now active" << dendl;
healthmon()->tick();
if (healthmon()->is_proposing()) {
dout(20) << __func__ << " healthmon proposing, waiting" << dendl;
healthmon()->wait_for_finished_proposal(nullptr, new C_MonContext{this,
[this](int r){
ceph_assert(ceph_mutex_is_locked_by_me(lock));
do_health_to_clog_interval();
}});
} else {
do_health_to_clog_interval();
}
}));
scrub_event_start();
}
}
void Monitor::lose_election(epoch_t epoch, set<int> &q, int l,
uint64_t features,
const mon_feature_t& mon_features,
ceph_release_t min_mon_release)
{
state = STATE_PEON;
leader_since = utime_t();
quorum_since = mono_clock::now();
leader = l;
quorum = q;
outside_quorum.clear();
quorum_con_features = features;
quorum_mon_features = mon_features;
quorum_min_mon_release = min_mon_release;
dout(10) << "lose_election, epoch " << epoch << " leader is mon" << leader
<< " quorum is " << quorum << " features are " << quorum_con_features
<< " mon_features are " << quorum_mon_features
<< " min_mon_release " << min_mon_release
<< dendl;
paxos->peon_init();
_finish_svc_election();
logger->inc(l_mon_election_lose);
finish_election();
}
namespace {
std::string collect_compression_algorithms()
{
ostringstream os;
bool printed = false;
for (auto [name, key] : Compressor::compression_algorithms) {
if (printed) {
os << ", ";
} else {
printed = true;
}
std::ignore = key;
os << name;
}
return os.str();
}
}
void Monitor::collect_metadata(Metadata *m)
{
collect_sys_info(m, g_ceph_context);
(*m)["addrs"] = stringify(messenger->get_myaddrs());
(*m)["compression_algorithms"] = collect_compression_algorithms();
// infer storage device
string devname = store->get_devname();
set<string> devnames;
get_raw_devices(devname, &devnames);
map<string,string> errs;
get_device_metadata(devnames, m, &errs);
for (auto& i : errs) {
dout(1) << __func__ << " " << i.first << ": " << i.second << dendl;
}
}
void Monitor::finish_election()
{
apply_quorum_to_compatset_features();
apply_monmap_to_compatset_features();
timecheck_finish();
exited_quorum = utime_t();
finish_contexts(g_ceph_context, waitfor_quorum);
finish_contexts(g_ceph_context, maybe_wait_for_quorum);
resend_routed_requests();
update_logger();
register_cluster_logger();
// enable authentication
{
std::lock_guard l(auth_lock);
authmon()->_set_mon_num_rank(monmap->size(), rank);
}
// am i named and located properly?
string cur_name = monmap->get_name(messenger->get_myaddrs());
const auto my_infop = monmap->mon_info.find(cur_name);
const map<string,string>& map_crush_loc = my_infop->second.crush_loc;
if (cur_name != name ||
(need_set_crush_loc && map_crush_loc != crush_loc)) {
dout(10) << " renaming/moving myself from " << cur_name << "/"
<< map_crush_loc <<" -> " << name << "/" << crush_loc << dendl;
send_mon_message(new MMonJoin(monmap->fsid, name, messenger->get_myaddrs(),
crush_loc, need_set_crush_loc),
leader);
return;
}
do_stretch_mode_election_work();
}
void Monitor::_apply_compatset_features(CompatSet &new_features)
{
if (new_features.compare(features) != 0) {
CompatSet diff = features.unsupported(new_features);
dout(1) << __func__ << " enabling new quorum features: " << diff << dendl;
features = new_features;
auto t = std::make_shared<MonitorDBStore::Transaction>();
write_features(t);
store->apply_transaction(t);
calc_quorum_requirements();
}
}
void Monitor::apply_quorum_to_compatset_features()
{
CompatSet new_features(features);
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OSD_ERASURE_CODES);
if (quorum_con_features & CEPH_FEATURE_OSDMAP_ENC) {
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OSDMAP_ENC);
}
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V2);
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V3);
dout(5) << __func__ << dendl;
_apply_compatset_features(new_features);
}
void Monitor::apply_monmap_to_compatset_features()
{
CompatSet new_features(features);
mon_feature_t monmap_features = monmap->get_required_features();
/* persistent monmap features may go into the compatset.
* optional monmap features may not - why?
* because optional monmap features may be set/unset by the admin,
* and possibly by other means that haven't yet been thought out,
* so we can't make the monitor enforce them on start - because they
* may go away.
* this, of course, does not invalidate setting a compatset feature
* for an optional feature - as long as you make sure to clean it up
* once you unset it.
*/
if (monmap_features.contains_all(ceph::features::mon::FEATURE_KRAKEN)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_KRAKEN));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_KRAKEN));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_KRAKEN);
}
if (monmap_features.contains_all(ceph::features::mon::FEATURE_LUMINOUS)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_LUMINOUS));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_LUMINOUS));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_LUMINOUS);
}
if (monmap_features.contains_all(ceph::features::mon::FEATURE_MIMIC)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_MIMIC));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_MIMIC));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_MIMIC);
}
if (monmap_features.contains_all(ceph::features::mon::FEATURE_NAUTILUS)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_NAUTILUS));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_NAUTILUS));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_NAUTILUS);
}
if (monmap_features.contains_all(ceph::features::mon::FEATURE_OCTOPUS)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_OCTOPUS));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_OCTOPUS));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OCTOPUS);
}
if (monmap_features.contains_all(ceph::features::mon::FEATURE_PACIFIC)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_PACIFIC));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_PACIFIC));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_PACIFIC);
}
if (monmap_features.contains_all(ceph::features::mon::FEATURE_QUINCY)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_QUINCY));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_QUINCY));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_QUINCY);
}
if (monmap_features.contains_all(ceph::features::mon::FEATURE_REEF)) {
ceph_assert(ceph::features::mon::get_persistent().contains_all(
ceph::features::mon::FEATURE_REEF));
// this feature should only ever be set if the quorum supports it.
ceph_assert(HAVE_FEATURE(quorum_con_features, SERVER_REEF));
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_REEF);
}
dout(5) << __func__ << dendl;
_apply_compatset_features(new_features);
}
void Monitor::calc_quorum_requirements()
{
required_features = 0;
// compatset
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_OSDMAP_ENC)) {
required_features |= CEPH_FEATURE_OSDMAP_ENC;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_KRAKEN)) {
required_features |= CEPH_FEATUREMASK_SERVER_KRAKEN;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_LUMINOUS)) {
required_features |= CEPH_FEATUREMASK_SERVER_LUMINOUS;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_MIMIC)) {
required_features |= CEPH_FEATUREMASK_SERVER_MIMIC;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_NAUTILUS)) {
required_features |= CEPH_FEATUREMASK_SERVER_NAUTILUS |
CEPH_FEATUREMASK_CEPHX_V2;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_OCTOPUS)) {
required_features |= CEPH_FEATUREMASK_SERVER_OCTOPUS;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_PACIFIC)) {
required_features |= CEPH_FEATUREMASK_SERVER_PACIFIC;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_QUINCY)) {
required_features |= CEPH_FEATUREMASK_SERVER_QUINCY;
}
if (features.incompat.contains(CEPH_MON_FEATURE_INCOMPAT_REEF)) {
required_features |= CEPH_FEATUREMASK_SERVER_REEF;
}
// monmap
if (monmap->get_required_features().contains_all(
ceph::features::mon::FEATURE_KRAKEN)) {
required_features |= CEPH_FEATUREMASK_SERVER_KRAKEN;
}
if (monmap->get_required_features().contains_all(
ceph::features::mon::FEATURE_LUMINOUS)) {
required_features |= CEPH_FEATUREMASK_SERVER_LUMINOUS;
}
if (monmap->get_required_features().contains_all(
ceph::features::mon::FEATURE_MIMIC)) {
required_features |= CEPH_FEATUREMASK_SERVER_MIMIC;
}
if (monmap->get_required_features().contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
required_features |= CEPH_FEATUREMASK_SERVER_NAUTILUS |
CEPH_FEATUREMASK_CEPHX_V2;
}
dout(10) << __func__ << " required_features " << required_features << dendl;
}
void Monitor::get_combined_feature_map(FeatureMap *fm)
{
*fm += session_map.feature_map;
for (auto id : quorum) {
if (id != rank) {
*fm += quorum_feature_map[id];
}
}
}
void Monitor::sync_force(Formatter *f)
{
auto tx(std::make_shared<MonitorDBStore::Transaction>());
sync_stash_critical_state(tx);
tx->put("mon_sync", "force_sync", 1);
store->apply_transaction(tx);
f->open_object_section("sync_force");
f->dump_int("ret", 0);
f->dump_stream("msg") << "forcing store sync the next time the monitor starts";
f->close_section(); // sync_force
}
void Monitor::_quorum_status(Formatter *f, ostream& ss)
{
bool free_formatter = false;
if (!f) {
// louzy/lazy hack: default to json if no formatter has been defined
f = new JSONFormatter();
free_formatter = true;
}
f->open_object_section("quorum_status");
f->dump_int("election_epoch", get_epoch());
f->open_array_section("quorum");
for (set<int>::iterator p = quorum.begin(); p != quorum.end(); ++p)
f->dump_int("mon", *p);
f->close_section(); // quorum
list<string> quorum_names = get_quorum_names();
f->open_array_section("quorum_names");
for (list<string>::iterator p = quorum_names.begin(); p != quorum_names.end(); ++p)
f->dump_string("mon", *p);
f->close_section(); // quorum_names
f->dump_string("quorum_leader_name", quorum.empty() ? string() : monmap->get_name(leader));
if (!quorum.empty()) {
f->dump_int(
"quorum_age",
quorum_age());
}
f->open_object_section("features");
f->dump_stream("quorum_con") << quorum_con_features;
quorum_mon_features.dump(f, "quorum_mon");
f->close_section();
f->open_object_section("monmap");
monmap->dump(f);
f->close_section(); // monmap
f->close_section(); // quorum_status
f->flush(ss);
if (free_formatter)
delete f;
}
void Monitor::get_mon_status(Formatter *f)
{
f->open_object_section("mon_status");
f->dump_string("name", name);
f->dump_int("rank", rank);
f->dump_string("state", get_state_name());
f->dump_int("election_epoch", get_epoch());
f->open_array_section("quorum");
for (set<int>::iterator p = quorum.begin(); p != quorum.end(); ++p) {
f->dump_int("mon", *p);
}
f->close_section(); // quorum
if (!quorum.empty()) {
f->dump_int(
"quorum_age",
quorum_age());
}
f->open_object_section("features");
f->dump_stream("required_con") << required_features;
mon_feature_t req_mon_features = get_required_mon_features();
req_mon_features.dump(f, "required_mon");
f->dump_stream("quorum_con") << quorum_con_features;
quorum_mon_features.dump(f, "quorum_mon");
f->close_section(); // features
f->open_array_section("outside_quorum");
for (set<string>::iterator p = outside_quorum.begin(); p != outside_quorum.end(); ++p)
f->dump_string("mon", *p);
f->close_section(); // outside_quorum
f->open_array_section("extra_probe_peers");
for (set<entity_addrvec_t>::iterator p = extra_probe_peers.begin();
p != extra_probe_peers.end();
++p) {
f->dump_object("peer", *p);
}
f->close_section(); // extra_probe_peers
f->open_array_section("sync_provider");
for (map<uint64_t,SyncProvider>::const_iterator p = sync_providers.begin();
p != sync_providers.end();
++p) {
f->dump_unsigned("cookie", p->second.cookie);
f->dump_object("addrs", p->second.addrs);
f->dump_stream("timeout") << p->second.timeout;
f->dump_unsigned("last_committed", p->second.last_committed);
f->dump_stream("last_key") << p->second.last_key;
}
f->close_section();
if (is_synchronizing()) {
f->open_object_section("sync");
f->dump_stream("sync_provider") << sync_provider;
f->dump_unsigned("sync_cookie", sync_cookie);
f->dump_unsigned("sync_start_version", sync_start_version);
f->close_section();
}
if (g_conf()->mon_sync_provider_kill_at > 0)
f->dump_int("provider_kill_at", g_conf()->mon_sync_provider_kill_at);
if (g_conf()->mon_sync_requester_kill_at > 0)
f->dump_int("requester_kill_at", g_conf()->mon_sync_requester_kill_at);
f->open_object_section("monmap");
monmap->dump(f);
f->close_section();
f->dump_object("feature_map", session_map.feature_map);
f->dump_bool("stretch_mode", stretch_mode_engaged);
f->close_section(); // mon_status
}
// health status to clog
void Monitor::health_tick_start()
{
if (!cct->_conf->mon_health_to_clog ||
cct->_conf->mon_health_to_clog_tick_interval <= 0)
return;
dout(15) << __func__ << dendl;
health_tick_stop();
health_tick_event = timer.add_event_after(
cct->_conf->mon_health_to_clog_tick_interval,
new C_MonContext{this, [this](int r) {
if (r < 0)
return;
health_tick_start();
}});
}
void Monitor::health_tick_stop()
{
dout(15) << __func__ << dendl;
if (health_tick_event) {
timer.cancel_event(health_tick_event);
health_tick_event = NULL;
}
}
ceph::real_clock::time_point Monitor::health_interval_calc_next_update()
{
auto now = ceph::real_clock::now();
auto secs = std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch());
int remainder = secs.count() % cct->_conf->mon_health_to_clog_interval;
int adjustment = cct->_conf->mon_health_to_clog_interval - remainder;
auto next = secs + std::chrono::seconds(adjustment);
dout(20) << __func__
<< " now: " << now << ","
<< " next: " << next << ","
<< " interval: " << cct->_conf->mon_health_to_clog_interval
<< dendl;
return ceph::real_clock::time_point{next};
}
void Monitor::health_interval_start()
{
dout(15) << __func__ << dendl;
if (!cct->_conf->mon_health_to_clog ||
cct->_conf->mon_health_to_clog_interval <= 0) {
return;
}
health_interval_stop();
auto next = health_interval_calc_next_update();
health_interval_event = new C_MonContext{this, [this](int r) {
if (r < 0)
return;
do_health_to_clog_interval();
}};
if (!timer.add_event_at(next, health_interval_event)) {
health_interval_event = nullptr;
}
}
void Monitor::health_interval_stop()
{
dout(15) << __func__ << dendl;
if (health_interval_event) {
timer.cancel_event(health_interval_event);
}
health_interval_event = NULL;
}
void Monitor::health_events_cleanup()
{
health_tick_stop();
health_interval_stop();
health_status_cache.reset();
}
void Monitor::health_to_clog_update_conf(const std::set<std::string> &changed)
{
dout(20) << __func__ << dendl;
if (changed.count("mon_health_to_clog")) {
if (!cct->_conf->mon_health_to_clog) {
health_events_cleanup();
return;
} else {
if (!health_tick_event) {
health_tick_start();
}
if (!health_interval_event) {
health_interval_start();
}
}
}
if (changed.count("mon_health_to_clog_interval")) {
if (cct->_conf->mon_health_to_clog_interval <= 0) {
health_interval_stop();
} else {
health_interval_start();
}
}
if (changed.count("mon_health_to_clog_tick_interval")) {
if (cct->_conf->mon_health_to_clog_tick_interval <= 0) {
health_tick_stop();
} else {
health_tick_start();
}
}
}
void Monitor::do_health_to_clog_interval()
{
// outputting to clog may have been disabled in the conf
// since we were scheduled.
if (!cct->_conf->mon_health_to_clog ||
cct->_conf->mon_health_to_clog_interval <= 0)
return;
dout(10) << __func__ << dendl;
// do we have a cached value for next_clog_update? if not,
// do we know when the last update was?
do_health_to_clog(true);
health_interval_start();
}
void Monitor::do_health_to_clog(bool force)
{
// outputting to clog may have been disabled in the conf
// since we were scheduled.
if (!cct->_conf->mon_health_to_clog ||
cct->_conf->mon_health_to_clog_interval <= 0)
return;
dout(10) << __func__ << (force ? " (force)" : "") << dendl;
string summary;
health_status_t level = healthmon()->get_health_status(false, nullptr, &summary);
if (!force &&
summary == health_status_cache.summary &&
level == health_status_cache.overall)
return;
if (g_conf()->mon_health_detail_to_clog &&
summary != health_status_cache.summary &&
level != HEALTH_OK) {
string details;
level = healthmon()->get_health_status(true, nullptr, &details);
clog->health(level) << "Health detail: " << details;
} else {
clog->health(level) << "overall " << summary;
}
health_status_cache.summary = summary;
health_status_cache.overall = level;
}
void Monitor::log_health(
const health_check_map_t& updated,
const health_check_map_t& previous,
MonitorDBStore::TransactionRef t)
{
if (!g_conf()->mon_health_to_clog) {
return;
}
const utime_t now = ceph_clock_now();
// FIXME: log atomically as part of @t instead of using clog.
dout(10) << __func__ << " updated " << updated.checks.size()
<< " previous " << previous.checks.size()
<< dendl;
const auto min_log_period = g_conf().get_val<int64_t>(
"mon_health_log_update_period");
for (auto& p : updated.checks) {
auto q = previous.checks.find(p.first);
bool logged = false;
if (q == previous.checks.end()) {
// new
ostringstream ss;
ss << "Health check failed: " << p.second.summary << " ("
<< p.first << ")";
clog->health(p.second.severity) << ss.str();
logged = true;
} else {
if (p.second.summary != q->second.summary ||
p.second.severity != q->second.severity) {
auto status_iter = health_check_log_times.find(p.first);
if (status_iter != health_check_log_times.end()) {
if (p.second.severity == q->second.severity &&
now - status_iter->second.updated_at < min_log_period) {
// We already logged this recently and the severity is unchanged,
// so skip emitting an update of the summary string.
// We'll get an update out of tick() later if the check
// is still failing.
continue;
}
}
// summary or severity changed (ignore detail changes at this level)
ostringstream ss;
ss << "Health check update: " << p.second.summary << " (" << p.first << ")";
clog->health(p.second.severity) << ss.str();
logged = true;
}
}
// Record the time at which we last logged, so that we can check this
// when considering whether/when to print update messages.
if (logged) {
auto iter = health_check_log_times.find(p.first);
if (iter == health_check_log_times.end()) {
health_check_log_times.emplace(p.first, HealthCheckLogStatus(
p.second.severity, p.second.summary, now));
} else {
iter->second = HealthCheckLogStatus(
p.second.severity, p.second.summary, now);
}
}
}
for (auto& p : previous.checks) {
if (!updated.checks.count(p.first)) {
// cleared
ostringstream ss;
if (p.first == "DEGRADED_OBJECTS") {
clog->info() << "All degraded objects recovered";
} else if (p.first == "OSD_FLAGS") {
clog->info() << "OSD flags cleared";
} else {
clog->info() << "Health check cleared: " << p.first << " (was: "
<< p.second.summary << ")";
}
if (health_check_log_times.count(p.first)) {
health_check_log_times.erase(p.first);
}
}
}
if (previous.checks.size() && updated.checks.size() == 0) {
// We might be going into a fully healthy state, check
// other subsystems
bool any_checks = false;
for (auto& svc : paxos_service) {
if (&(svc->get_health_checks()) == &(previous)) {
// Ignore the ones we're clearing right now
continue;
}
if (svc->get_health_checks().checks.size() > 0) {
any_checks = true;
break;
}
}
if (!any_checks) {
clog->info() << "Cluster is now healthy";
}
}
}
void Monitor::update_pending_metadata()
{
Metadata metadata;
collect_metadata(&metadata);
size_t version_size = mon_metadata[rank]["ceph_version_short"].size();
const std::string current_version = mon_metadata[rank]["ceph_version_short"];
const std::string pending_version = metadata["ceph_version_short"];
if (current_version.compare(0, version_size, pending_version) != 0) {
mgr_client.update_daemon_metadata("mon", name, metadata);
}
}
void Monitor::get_cluster_status(stringstream &ss, Formatter *f,
MonSession *session)
{
if (f)
f->open_object_section("status");
const auto&& fs_names = session->get_allowed_fs_names();
if (f) {
f->dump_stream("fsid") << monmap->get_fsid();
healthmon()->get_health_status(false, f, nullptr);
f->dump_unsigned("election_epoch", get_epoch());
{
f->open_array_section("quorum");
for (set<int>::iterator p = quorum.begin(); p != quorum.end(); ++p)
f->dump_int("rank", *p);
f->close_section();
f->open_array_section("quorum_names");
for (set<int>::iterator p = quorum.begin(); p != quorum.end(); ++p)
f->dump_string("id", monmap->get_name(*p));
f->close_section();
f->dump_int(
"quorum_age",
quorum_age());
}
f->open_object_section("monmap");
monmap->dump_summary(f);
f->close_section();
f->open_object_section("osdmap");
osdmon()->osdmap.print_summary(f, cout, string(12, ' '));
f->close_section();
f->open_object_section("pgmap");
mgrstatmon()->print_summary(f, NULL);
f->close_section();
f->open_object_section("fsmap");
FSMap fsmap_copy = mdsmon()->get_fsmap();
if (!fs_names.empty()) {
fsmap_copy.filter(fs_names);
}
const FSMap *fsmapp = &fsmap_copy;
fsmapp->print_summary(f, NULL);
f->close_section();
f->open_object_section("mgrmap");
mgrmon()->get_map().print_summary(f, nullptr);
f->close_section();
f->dump_object("servicemap", mgrstatmon()->get_service_map());
f->open_object_section("progress_events");
for (auto& i : mgrstatmon()->get_progress_events()) {
f->dump_object(i.first.c_str(), i.second);
}
f->close_section();
f->close_section();
} else {
ss << " cluster:\n";
ss << " id: " << monmap->get_fsid() << "\n";
string health;
healthmon()->get_health_status(false, nullptr, &health,
"\n ", "\n ");
ss << " health: " << health << "\n";
ss << "\n \n services:\n";
{
size_t maxlen = 3;
auto& service_map = mgrstatmon()->get_service_map();
for (auto& p : service_map.services) {
maxlen = std::max(maxlen, p.first.size());
}
string spacing(maxlen - 3, ' ');
const auto quorum_names = get_quorum_names();
const auto mon_count = monmap->mon_info.size();
auto mnow = ceph::mono_clock::now();
ss << " mon: " << spacing << mon_count << " daemons, quorum "
<< quorum_names << " (age " << timespan_str(mnow - quorum_since) << ")";
if (quorum_names.size() != mon_count) {
std::list<std::string> out_of_q;
for (size_t i = 0; i < monmap->ranks.size(); ++i) {
if (quorum.count(i) == 0) {
out_of_q.push_back(monmap->ranks[i]);
}
}
ss << ", out of quorum: " << joinify(out_of_q.begin(),
out_of_q.end(), std::string(", "));
}
ss << "\n";
if (mgrmon()->in_use()) {
ss << " mgr: " << spacing;
mgrmon()->get_map().print_summary(nullptr, &ss);
ss << "\n";
}
FSMap fsmap_copy = mdsmon()->get_fsmap();
if (!fs_names.empty()) {
fsmap_copy.filter(fs_names);
}
const FSMap *fsmapp = &fsmap_copy;
if (fsmapp->filesystem_count() > 0 and mdsmon()->should_print_status()){
ss << " mds: " << spacing;
fsmapp->print_daemon_summary(ss);
ss << "\n";
}
ss << " osd: " << spacing;
osdmon()->osdmap.print_summary(NULL, ss, string(maxlen + 6, ' '));
ss << "\n";
for (auto& p : service_map.services) {
const std::string &service = p.first;
// filter out normal ceph entity types
if (ServiceMap::is_normal_ceph_entity(service)) {
continue;
}
ss << " " << p.first << ": " << string(maxlen - p.first.size(), ' ')
<< p.second.get_summary() << "\n";
}
}
if (auto& service_map = mgrstatmon()->get_service_map();
std::any_of(service_map.services.begin(),
service_map.services.end(),
[](auto& service) {
return service.second.has_running_tasks();
})) {
ss << "\n \n task status:\n";
for (auto& [name, service] : service_map.services) {
ss << service.get_task_summary(name);
}
}
ss << "\n \n data:\n";
mdsmon()->print_fs_summary(ss);
mgrstatmon()->print_summary(NULL, &ss);
auto& pem = mgrstatmon()->get_progress_events();
if (!pem.empty()) {
ss << "\n \n progress:\n";
for (auto& i : pem) {
if (i.second.add_to_ceph_s){
ss << " " << i.second.message << "\n";
}
}
}
ss << "\n ";
}
}
void Monitor::_generate_command_map(cmdmap_t& cmdmap,
map<string,string> ¶m_str_map)
{
for (auto p = cmdmap.begin(); p != cmdmap.end(); ++p) {
if (p->first == "prefix")
continue;
if (p->first == "caps") {
vector<string> cv;
if (cmd_getval(cmdmap, "caps", cv) &&
cv.size() % 2 == 0) {
for (unsigned i = 0; i < cv.size(); i += 2) {
string k = string("caps_") + cv[i];
param_str_map[k] = cv[i + 1];
}
continue;
}
}
param_str_map[p->first] = cmd_vartype_stringify(p->second);
}
}
const MonCommand *Monitor::_get_moncommand(
const string &cmd_prefix,
const vector<MonCommand>& cmds)
{
for (auto& c : cmds) {
if (c.cmdstring.compare(0, cmd_prefix.size(), cmd_prefix) == 0) {
return &c;
}
}
return nullptr;
}
bool Monitor::_allowed_command(MonSession *s, const string &module,
const string &prefix, const cmdmap_t& cmdmap,
const map<string,string>& param_str_map,
const MonCommand *this_cmd) {
bool cmd_r = this_cmd->requires_perm('r');
bool cmd_w = this_cmd->requires_perm('w');
bool cmd_x = this_cmd->requires_perm('x');
bool capable = s->caps.is_capable(
g_ceph_context,
s->entity_name,
module, prefix, param_str_map,
cmd_r, cmd_w, cmd_x,
s->get_peer_socket_addr());
dout(10) << __func__ << " " << (capable ? "" : "not ") << "capable" << dendl;
return capable;
}
void Monitor::format_command_descriptions(const std::vector<MonCommand> &commands,
Formatter *f,
uint64_t features,
bufferlist *rdata)
{
int cmdnum = 0;
f->open_object_section("command_descriptions");
for (const auto &cmd : commands) {
unsigned flags = cmd.flags;
ostringstream secname;
secname << "cmd" << setfill('0') << std::setw(3) << cmdnum;
dump_cmddesc_to_json(f, features, secname.str(),
cmd.cmdstring, cmd.helpstring, cmd.module,
cmd.req_perms, flags);
cmdnum++;
}
f->close_section(); // command_descriptions
f->flush(*rdata);
}
bool Monitor::is_keyring_required()
{
return auth_cluster_required.is_supported_auth(CEPH_AUTH_CEPHX) ||
auth_service_required.is_supported_auth(CEPH_AUTH_CEPHX) ||
auth_cluster_required.is_supported_auth(CEPH_AUTH_GSS) ||
auth_service_required.is_supported_auth(CEPH_AUTH_GSS);
}
struct C_MgrProxyCommand : public Context {
Monitor *mon;
MonOpRequestRef op;
uint64_t size;
bufferlist outbl;
string outs;
C_MgrProxyCommand(Monitor *mon, MonOpRequestRef op, uint64_t s)
: mon(mon), op(op), size(s) { }
void finish(int r) {
std::lock_guard l(mon->lock);
mon->mgr_proxy_bytes -= size;
mon->reply_command(op, r, outs, outbl, 0);
}
};
void Monitor::handle_tell_command(MonOpRequestRef op)
{
ceph_assert(op->is_type_command());
MCommand *m = static_cast<MCommand*>(op->get_req());
if (m->fsid != monmap->fsid) {
dout(0) << "handle_command on fsid " << m->fsid << " != " << monmap->fsid << dendl;
return reply_tell_command(op, -EACCES, "wrong fsid");
}
MonSession *session = op->get_session();
if (!session) {
dout(5) << __func__ << " dropping stray message " << *m << dendl;
return;
}
cmdmap_t cmdmap;
if (stringstream ss; !cmdmap_from_json(m->cmd, &cmdmap, ss)) {
return reply_tell_command(op, -EINVAL, ss.str());
}
map<string,string> param_str_map;
_generate_command_map(cmdmap, param_str_map);
string prefix;
if (!cmd_getval(cmdmap, "prefix", prefix)) {
return reply_tell_command(op, -EINVAL, "no prefix");
}
if (auto cmd = _get_moncommand(prefix,
get_local_commands(quorum_mon_features));
cmd) {
if (cmd->is_obsolete() ||
(cct->_conf->mon_debug_deprecated_as_obsolete &&
cmd->is_deprecated())) {
return reply_tell_command(op, -ENOTSUP,
"command is obsolete; "
"please check usage and/or man page");
}
}
// see if command is allowed
if (!session->caps.is_capable(
g_ceph_context,
session->entity_name,
"mon", prefix, param_str_map,
true, true, true,
session->get_peer_socket_addr())) {
return reply_tell_command(op, -EACCES, "insufficient caps");
}
// pass it to asok
cct->get_admin_socket()->queue_tell_command(m);
}
void Monitor::handle_command(MonOpRequestRef op)
{
ceph_assert(op->is_type_command());
auto m = op->get_req<MMonCommand>();
if (m->fsid != monmap->fsid) {
dout(0) << "handle_command on fsid " << m->fsid << " != " << monmap->fsid
<< dendl;
reply_command(op, -EPERM, "wrong fsid", 0);
return;
}
MonSession *session = op->get_session();
if (!session) {
dout(5) << __func__ << " dropping stray message " << *m << dendl;
return;
}
if (m->cmd.empty()) {
reply_command(op, -EINVAL, "no command specified", 0);
return;
}
string prefix;
vector<string> fullcmd;
cmdmap_t cmdmap;
stringstream ss, ds;
bufferlist rdata;
string rs;
int r = -EINVAL;
rs = "unrecognized command";
if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) {
// ss has reason for failure
r = -EINVAL;
rs = ss.str();
if (!m->get_source().is_mon()) // don't reply to mon->mon commands
reply_command(op, r, rs, 0);
return;
}
// check return value. If no prefix parameter provided,
// return value will be false, then return error info.
if (!cmd_getval(cmdmap, "prefix", prefix)) {
reply_command(op, -EINVAL, "command prefix not found", 0);
return;
}
// check prefix is empty
if (prefix.empty()) {
reply_command(op, -EINVAL, "command prefix must not be empty", 0);
return;
}
if (prefix == "get_command_descriptions") {
bufferlist rdata;
Formatter *f = Formatter::create("json");
std::vector<MonCommand> commands = static_cast<MgrMonitor*>(
paxos_service[PAXOS_MGR].get())->get_command_descs();
for (auto& c : leader_mon_commands) {
commands.push_back(c);
}
auto features = m->get_connection()->get_features();
format_command_descriptions(commands, f, features, &rdata);
delete f;
reply_command(op, 0, "", rdata, 0);
return;
}
dout(0) << "handle_command " << *m << dendl;
string format = cmd_getval_or<string>(cmdmap, "format", "plain");
boost::scoped_ptr<Formatter> f(Formatter::create(format));
get_str_vec(prefix, fullcmd);
// make sure fullcmd is not empty.
// invalid prefix will cause empty vector fullcmd.
// such as, prefix=";,,;"
if (fullcmd.empty()) {
reply_command(op, -EINVAL, "command requires a prefix to be valid", 0);
return;
}
std::string_view module = fullcmd[0];
// validate command is in leader map
const MonCommand *leader_cmd;
const auto& mgr_cmds = mgrmon()->get_command_descs();
const MonCommand *mgr_cmd = nullptr;
if (!mgr_cmds.empty()) {
mgr_cmd = _get_moncommand(prefix, mgr_cmds);
}
leader_cmd = _get_moncommand(prefix, leader_mon_commands);
if (!leader_cmd) {
leader_cmd = mgr_cmd;
if (!leader_cmd) {
reply_command(op, -EINVAL, "command not known", 0);
return;
}
}
// validate command is in our map & matches, or forward if it is allowed
const MonCommand *mon_cmd = _get_moncommand(
prefix,
get_local_commands(quorum_mon_features));
if (!mon_cmd) {
mon_cmd = mgr_cmd;
}
if (!is_leader()) {
if (!mon_cmd) {
if (leader_cmd->is_noforward()) {
reply_command(op, -EINVAL,
"command not locally supported and not allowed to forward",
0);
return;
}
dout(10) << "Command not locally supported, forwarding request "
<< m << dendl;
forward_request_leader(op);
return;
} else if (!mon_cmd->is_compat(leader_cmd)) {
if (mon_cmd->is_noforward()) {
reply_command(op, -EINVAL,
"command not compatible with leader and not allowed to forward",
0);
return;
}
dout(10) << "Command not compatible with leader, forwarding request "
<< m << dendl;
forward_request_leader(op);
return;
}
}
if (mon_cmd->is_obsolete() ||
(cct->_conf->mon_debug_deprecated_as_obsolete
&& mon_cmd->is_deprecated())) {
reply_command(op, -ENOTSUP,
"command is obsolete; please check usage and/or man page",
0);
return;
}
if (session->proxy_con && mon_cmd->is_noforward()) {
dout(10) << "Got forward for noforward command " << m << dendl;
reply_command(op, -EINVAL, "forward for noforward command", rdata, 0);
return;
}
/* what we perceive as being the service the command falls under */
string service(mon_cmd->module);
dout(25) << __func__ << " prefix='" << prefix
<< "' module='" << module
<< "' service='" << service << "'" << dendl;
bool cmd_is_rw =
(mon_cmd->requires_perm('w') || mon_cmd->requires_perm('x'));
// validate user's permissions for requested command
map<string,string> param_str_map;
// Catch bad_cmd_get exception if _generate_command_map() throws it
try {
_generate_command_map(cmdmap, param_str_map);
} catch (const bad_cmd_get& e) {
reply_command(op, -EINVAL, e.what(), 0);
return;
}
if (!_allowed_command(session, service, prefix, cmdmap,
param_str_map, mon_cmd)) {
dout(1) << __func__ << " access denied" << dendl;
if (prefix != "config set" && prefix != "config-key set")
(cmd_is_rw ? audit_clog->info() : audit_clog->debug())
<< "from='" << session->name << " " << session->addrs << "' "
<< "entity='" << session->entity_name << "' "
<< "cmd=" << m->cmd << ": access denied";
reply_command(op, -EACCES, "access denied", 0);
return;
}
if (prefix != "config set" && prefix != "config-key set")
(cmd_is_rw ? audit_clog->info() : audit_clog->debug())
<< "from='" << session->name << " " << session->addrs << "' "
<< "entity='" << session->entity_name << "' "
<< "cmd=" << m->cmd << ": dispatch";
// compat kludge for legacy clients trying to tell commands that are
// new. see bottom of MonCommands.h. we need to handle both (1)
// pre-octopus clients and (2) octopus clients with a mix of pre-octopus
// and octopus mons.
if ((!HAVE_FEATURE(m->get_connection()->get_features(), SERVER_OCTOPUS) ||
monmap->min_mon_release < ceph_release_t::octopus) &&
(prefix == "injectargs" ||
prefix == "smart" ||
prefix == "mon_status" ||
prefix == "heap")) {
if (m->get_connection()->get_messenger() == 0) {
// Prior to octopus, monitors might forward these messages
// around. that was broken at baseline, and if we try to process
// this message now, it will assert out when we try to send a
// message in reply from the asok/tell worker (see
// AnonConnection). Just reply with an error.
dout(5) << __func__ << " failing forwarded command from a (presumably) "
<< "pre-octopus peer" << dendl;
reply_command(
op, -EBUSY,
"failing forwarded tell command in mixed-version mon cluster", 0);
return;
}
dout(5) << __func__ << " passing command to tell/asok" << dendl;
cct->get_admin_socket()->queue_tell_command(m);
return;
}
if (mon_cmd->is_mgr()) {
const auto& hdr = m->get_header();
uint64_t size = hdr.front_len + hdr.middle_len + hdr.data_len;
uint64_t max = g_conf().get_val<Option::size_t>("mon_client_bytes")
* g_conf().get_val<double>("mon_mgr_proxy_client_bytes_ratio");
if (mgr_proxy_bytes + size > max) {
dout(10) << __func__ << " current mgr proxy bytes " << mgr_proxy_bytes
<< " + " << size << " > max " << max << dendl;
reply_command(op, -EAGAIN, "hit limit on proxied mgr commands", rdata, 0);
return;
}
mgr_proxy_bytes += size;
dout(10) << __func__ << " proxying mgr command (+" << size
<< " -> " << mgr_proxy_bytes << ")" << dendl;
C_MgrProxyCommand *fin = new C_MgrProxyCommand(this, op, size);
mgr_client.start_command(m->cmd,
m->get_data(),
&fin->outbl,
&fin->outs,
new C_OnFinisher(fin, &finisher));
return;
}
if ((module == "mds" || module == "fs") &&
prefix != "fs authorize") {
mdsmon()->dispatch(op);
return;
}
if ((module == "osd" ||
prefix == "pg map" ||
prefix == "pg repeer") &&
prefix != "osd last-stat-seq") {
osdmon()->dispatch(op);
return;
}
if (module == "config") {
configmon()->dispatch(op);
return;
}
if (module == "mon" &&
/* Let the Monitor class handle the following commands:
* 'mon scrub'
*/
prefix != "mon scrub" &&
prefix != "mon metadata" &&
prefix != "mon versions" &&
prefix != "mon count-metadata" &&
prefix != "mon ok-to-stop" &&
prefix != "mon ok-to-add-offline" &&
prefix != "mon ok-to-rm") {
monmon()->dispatch(op);
return;
}
if (module == "health" && prefix != "health") {
healthmon()->dispatch(op);
return;
}
if (module == "auth" || prefix == "fs authorize") {
authmon()->dispatch(op);
return;
}
if (module == "log") {
logmon()->dispatch(op);
return;
}
if (module == "config-key") {
kvmon()->dispatch(op);
return;
}
if (module == "mgr") {
mgrmon()->dispatch(op);
return;
}
if (prefix == "fsid") {
if (f) {
f->open_object_section("fsid");
f->dump_stream("fsid") << monmap->fsid;
f->close_section();
f->flush(rdata);
} else {
ds << monmap->fsid;
rdata.append(ds);
}
reply_command(op, 0, "", rdata, 0);
return;
}
if (prefix == "mon scrub") {
wait_for_paxos_write();
if (is_leader()) {
int r = scrub_start();
reply_command(op, r, "", rdata, 0);
} else if (is_peon()) {
forward_request_leader(op);
} else {
reply_command(op, -EAGAIN, "no quorum", rdata, 0);
}
return;
}
if (prefix == "time-sync-status") {
if (!f)
f.reset(Formatter::create("json-pretty"));
f->open_object_section("time_sync");
if (!timecheck_skews.empty()) {
f->open_object_section("time_skew_status");
for (auto& i : timecheck_skews) {
double skew = i.second;
double latency = timecheck_latencies[i.first];
string name = monmap->get_name(i.first);
ostringstream tcss;
health_status_t tcstatus = timecheck_status(tcss, skew, latency);
f->open_object_section(name.c_str());
f->dump_float("skew", skew);
f->dump_float("latency", latency);
f->dump_stream("health") << tcstatus;
if (tcstatus != HEALTH_OK) {
f->dump_stream("details") << tcss.str();
}
f->close_section();
}
f->close_section();
}
f->open_object_section("timechecks");
f->dump_unsigned("epoch", get_epoch());
f->dump_int("round", timecheck_round);
f->dump_stream("round_status") << ((timecheck_round%2) ?
"on-going" : "finished");
f->close_section();
f->close_section();
f->flush(rdata);
r = 0;
rs = "";
} else if (prefix == "status" ||
prefix == "health" ||
prefix == "df") {
string detail;
cmd_getval(cmdmap, "detail", detail);
if (prefix == "status") {
// get_cluster_status handles f == NULL
get_cluster_status(ds, f.get(), session);
if (f) {
f->flush(ds);
ds << '\n';
}
rdata.append(ds);
} else if (prefix == "health") {
string plain;
healthmon()->get_health_status(detail == "detail", f.get(), f ? nullptr : &plain);
if (f) {
f->flush(ds);
rdata.append(ds);
} else {
rdata.append(plain);
}
} else if (prefix == "df") {
bool verbose = (detail == "detail");
if (f)
f->open_object_section("stats");
mgrstatmon()->dump_cluster_stats(&ds, f.get(), verbose);
if (!f) {
ds << "\n \n";
}
mgrstatmon()->dump_pool_stats(osdmon()->osdmap, &ds, f.get(), verbose);
if (f) {
f->close_section();
f->flush(ds);
ds << '\n';
}
} else {
ceph_abort_msg("We should never get here!");
return;
}
rdata.append(ds);
rs = "";
r = 0;
} else if (prefix == "report") {
// some of the report data is only known by leader, e.g. osdmap_clean_epochs
if (!is_leader() && !is_peon()) {
dout(10) << " waiting for quorum" << dendl;
waitfor_quorum.push_back(new C_RetryMessage(this, op));
return;
}
if (!is_leader()) {
forward_request_leader(op);
return;
}
// this must be formatted, in its current form
if (!f)
f.reset(Formatter::create("json-pretty"));
f->open_object_section("report");
f->dump_stream("cluster_fingerprint") << fingerprint;
f->dump_string("version", ceph_version_to_str());
f->dump_string("commit", git_version_to_str());
f->dump_stream("timestamp") << ceph_clock_now();
vector<string> tagsvec;
cmd_getval(cmdmap, "tags", tagsvec);
string tagstr = str_join(tagsvec, " ");
if (!tagstr.empty())
tagstr = tagstr.substr(0, tagstr.find_last_of(' '));
f->dump_string("tag", tagstr);
healthmon()->get_health_status(true, f.get(), nullptr);
monmon()->dump_info(f.get());
osdmon()->dump_info(f.get());
mdsmon()->dump_info(f.get());
authmon()->dump_info(f.get());
mgrstatmon()->dump_info(f.get());
logmon()->dump_info(f.get());
paxos->dump_info(f.get());
f->close_section();
f->flush(rdata);
ostringstream ss2;
ss2 << "report " << rdata.crc32c(CEPH_MON_PORT_LEGACY);
rs = ss2.str();
r = 0;
} else if (prefix == "osd last-stat-seq") {
int64_t osd = 0;
cmd_getval(cmdmap, "id", osd);
uint64_t seq = mgrstatmon()->get_last_osd_stat_seq(osd);
if (f) {
f->dump_unsigned("seq", seq);
f->flush(ds);
} else {
ds << seq;
rdata.append(ds);
}
rs = "";
r = 0;
} else if (prefix == "node ls") {
string node_type("all");
cmd_getval(cmdmap, "type", node_type);
if (!f)
f.reset(Formatter::create("json-pretty"));
if (node_type == "all") {
f->open_object_section("nodes");
print_nodes(f.get(), ds);
osdmon()->print_nodes(f.get());
mdsmon()->print_nodes(f.get());
mgrmon()->print_nodes(f.get());
f->close_section();
} else if (node_type == "mon") {
print_nodes(f.get(), ds);
} else if (node_type == "osd") {
osdmon()->print_nodes(f.get());
} else if (node_type == "mds") {
mdsmon()->print_nodes(f.get());
} else if (node_type == "mgr") {
mgrmon()->print_nodes(f.get());
}
f->flush(ds);
rdata.append(ds);
rs = "";
r = 0;
} else if (prefix == "features") {
if (!is_leader() && !is_peon()) {
dout(10) << " waiting for quorum" << dendl;
waitfor_quorum.push_back(new C_RetryMessage(this, op));
return;
}
if (!is_leader()) {
forward_request_leader(op);
return;
}
if (!f)
f.reset(Formatter::create("json-pretty"));
FeatureMap fm;
get_combined_feature_map(&fm);
f->dump_object("features", fm);
f->flush(rdata);
rs = "";
r = 0;
} else if (prefix == "mon metadata") {
if (!f)
f.reset(Formatter::create("json-pretty"));
string name;
bool all = !cmd_getval(cmdmap, "id", name);
if (!all) {
// Dump a single mon's metadata
int mon = monmap->get_rank(name);
if (mon < 0) {
rs = "requested mon not found";
r = -ENOENT;
goto out;
}
f->open_object_section("mon_metadata");
r = get_mon_metadata(mon, f.get(), ds);
f->close_section();
} else {
// Dump all mons' metadata
r = 0;
f->open_array_section("mon_metadata");
for (unsigned int rank = 0; rank < monmap->size(); ++rank) {
std::ostringstream get_err;
f->open_object_section("mon");
f->dump_string("name", monmap->get_name(rank));
r = get_mon_metadata(rank, f.get(), get_err);
f->close_section();
if (r == -ENOENT || r == -EINVAL) {
dout(1) << get_err.str() << dendl;
// Drop error, list what metadata we do have
r = 0;
} else if (r != 0) {
derr << "Unexpected error from get_mon_metadata: "
<< cpp_strerror(r) << dendl;
ds << get_err.str();
break;
}
}
f->close_section();
}
f->flush(ds);
rdata.append(ds);
rs = "";
} else if (prefix == "mon versions") {
if (!f)
f.reset(Formatter::create("json-pretty"));
count_metadata("ceph_version", f.get());
f->flush(ds);
rdata.append(ds);
rs = "";
r = 0;
} else if (prefix == "mon count-metadata") {
if (!f)
f.reset(Formatter::create("json-pretty"));
string field;
cmd_getval(cmdmap, "property", field);
count_metadata(field, f.get());
f->flush(ds);
rdata.append(ds);
rs = "";
r = 0;
} else if (prefix == "quorum_status") {
// make sure our map is readable and up to date
if (!is_leader() && !is_peon()) {
dout(10) << " waiting for quorum" << dendl;
waitfor_quorum.push_back(new C_RetryMessage(this, op));
return;
}
_quorum_status(f.get(), ds);
rdata.append(ds);
rs = "";
r = 0;
} else if (prefix == "mon ok-to-stop") {
vector<string> ids, invalid_ids;
if (!cmd_getval(cmdmap, "ids", ids)) {
r = -EINVAL;
goto out;
}
set<string> wouldbe;
for (auto rank : quorum) {
wouldbe.insert(monmap->get_name(rank));
}
for (auto& n : ids) {
if (monmap->contains(n)) {
wouldbe.erase(n);
} else {
invalid_ids.push_back(n);
}
}
if (!invalid_ids.empty()) {
r = 0;
rs = "invalid mon(s) specified: " + stringify(invalid_ids);
goto out;
}
if (wouldbe.size() < monmap->min_quorum_size()) {
r = -EBUSY;
rs = "not enough monitors would be available (" + stringify(wouldbe) +
") after stopping mons " + stringify(ids);
goto out;
}
r = 0;
rs = "quorum should be preserved (" + stringify(wouldbe) +
") after stopping " + stringify(ids);
} else if (prefix == "mon ok-to-add-offline") {
if (quorum.size() < monmap->min_quorum_size(monmap->size() + 1)) {
rs = "adding a monitor may break quorum (until that monitor starts)";
r = -EBUSY;
goto out;
}
rs = "adding another mon that is not yet online will not break quorum";
r = 0;
} else if (prefix == "mon ok-to-rm") {
string id;
if (!cmd_getval(cmdmap, "id", id)) {
r = -EINVAL;
rs = "must specify a monitor id";
goto out;
}
if (!monmap->contains(id)) {
r = 0;
rs = "mon." + id + " does not exist";
goto out;
}
int rank = monmap->get_rank(id);
if (quorum.count(rank) &&
quorum.size() - 1 < monmap->min_quorum_size(monmap->size() - 1)) {
r = -EBUSY;
rs = "removing mon." + id + " would break quorum";
goto out;
}
r = 0;
rs = "safe to remove mon." + id;
} else if (prefix == "version") {
if (f) {
f->open_object_section("version");
f->dump_string("version", pretty_version_to_str());
f->close_section();
f->flush(ds);
} else {
ds << pretty_version_to_str();
}
rdata.append(ds);
rs = "";
r = 0;
} else if (prefix == "versions") {
if (!f)
f.reset(Formatter::create("json-pretty"));
map<string,int> overall;
f->open_object_section("version");
map<string,int> mon, mgr, osd, mds;
count_metadata("ceph_version", &mon);
f->open_object_section("mon");
for (auto& p : mon) {
f->dump_int(p.first.c_str(), p.second);
overall[p.first] += p.second;
}
f->close_section();
mgrmon()->count_metadata("ceph_version", &mgr);
if (!mgr.empty()) {
f->open_object_section("mgr");
for (auto& p : mgr) {
f->dump_int(p.first.c_str(), p.second);
overall[p.first] += p.second;
}
f->close_section();
}
osdmon()->count_metadata("ceph_version", &osd);
if (!osd.empty()) {
f->open_object_section("osd");
for (auto& p : osd) {
f->dump_int(p.first.c_str(), p.second);
overall[p.first] += p.second;
}
f->close_section();
}
mdsmon()->count_metadata("ceph_version", &mds);
if (!mds.empty()) {
f->open_object_section("mds");
for (auto& p : mds) {
f->dump_int(p.first.c_str(), p.second);
overall[p.first] += p.second;
}
f->close_section();
}
for (auto& p : mgrstatmon()->get_service_map().services) {
auto &service = p.first;
if (ServiceMap::is_normal_ceph_entity(service)) {
continue;
}
f->open_object_section(service.c_str());
map<string,int> m;
p.second.count_metadata("ceph_version", &m);
for (auto& q : m) {
f->dump_int(q.first.c_str(), q.second);
overall[q.first] += q.second;
}
f->close_section();
}
f->open_object_section("overall");
for (auto& p : overall) {
f->dump_int(p.first.c_str(), p.second);
}
f->close_section();
f->close_section();
f->flush(rdata);
rs = "";
r = 0;
}
out:
if (!m->get_source().is_mon()) // don't reply to mon->mon commands
reply_command(op, r, rs, rdata, 0);
}
void Monitor::reply_command(MonOpRequestRef op, int rc, const string &rs, version_t version)
{
bufferlist rdata;
reply_command(op, rc, rs, rdata, version);
}
void Monitor::reply_command(MonOpRequestRef op, int rc, const string &rs,
bufferlist& rdata, version_t version)
{
auto m = op->get_req<MMonCommand>();
ceph_assert(m->get_type() == MSG_MON_COMMAND);
MMonCommandAck *reply = new MMonCommandAck(m->cmd, rc, rs, version);
reply->set_tid(m->get_tid());
reply->set_data(rdata);
send_reply(op, reply);
}
void Monitor::reply_tell_command(
MonOpRequestRef op, int rc, const string &rs)
{
MCommand *m = static_cast<MCommand*>(op->get_req());
ceph_assert(m->get_type() == MSG_COMMAND);
MCommandReply *reply = new MCommandReply(rc, rs);
reply->set_tid(m->get_tid());
m->get_connection()->send_message(reply);
}
// ------------------------
// request/reply routing
//
// a client/mds/osd will connect to a random monitor. we need to forward any
// messages requiring state updates to the leader, and then route any replies
// back via the correct monitor and back to them. (the monitor will not
// initiate any connections.)
void Monitor::forward_request_leader(MonOpRequestRef op)
{
op->mark_event(__func__);
int mon = get_leader();
MonSession *session = op->get_session();
PaxosServiceMessage *req = op->get_req<PaxosServiceMessage>();
if (req->get_source().is_mon() && req->get_source_addrs() != messenger->get_myaddrs()) {
dout(10) << "forward_request won't forward (non-local) mon request " << *req << dendl;
} else if (session->proxy_con) {
dout(10) << "forward_request won't double fwd request " << *req << dendl;
} else if (!session->closed) {
RoutedRequest *rr = new RoutedRequest;
rr->tid = ++routed_request_tid;
rr->con = req->get_connection();
rr->con_features = rr->con->get_features();
encode_message(req, CEPH_FEATURES_ALL, rr->request_bl); // for my use only; use all features
rr->session = static_cast<MonSession *>(session->get());
rr->op = op;
routed_requests[rr->tid] = rr;
session->routed_request_tids.insert(rr->tid);
dout(10) << "forward_request " << rr->tid << " request " << *req
<< " features " << rr->con_features << dendl;
MForward *forward = new MForward(rr->tid,
req,
rr->con_features,
rr->session->caps);
forward->set_priority(req->get_priority());
if (session->auth_handler) {
forward->entity_name = session->entity_name;
} else if (req->get_source().is_mon()) {
forward->entity_name.set_type(CEPH_ENTITY_TYPE_MON);
}
send_mon_message(forward, mon);
op->mark_forwarded();
ceph_assert(op->get_req()->get_type() != 0);
} else {
dout(10) << "forward_request no session for request " << *req << dendl;
}
}
// fake connection attached to forwarded messages
struct AnonConnection : public Connection {
entity_addr_t socket_addr;
int send_message(Message *m) override {
ceph_assert(!"send_message on anonymous connection");
}
void send_keepalive() override {
ceph_assert(!"send_keepalive on anonymous connection");
}
void mark_down() override {
// silently ignore
}
void mark_disposable() override {
// silengtly ignore
}
bool is_connected() override { return false; }
entity_addr_t get_peer_socket_addr() const override {
return socket_addr;
}
private:
FRIEND_MAKE_REF(AnonConnection);
explicit AnonConnection(CephContext *cct, const entity_addr_t& sa)
: Connection(cct, nullptr),
socket_addr(sa) {}
};
//extract the original message and put it into the regular dispatch function
void Monitor::handle_forward(MonOpRequestRef op)
{
auto m = op->get_req<MForward>();
dout(10) << "received forwarded message from "
<< ceph_entity_type_name(m->client_type)
<< " " << m->client_addrs
<< " via " << m->get_source_inst() << dendl;
MonSession *session = op->get_session();
ceph_assert(session);
if (!session->is_capable("mon", MON_CAP_X)) {
dout(0) << "forward from entity with insufficient caps! "
<< session->caps << dendl;
} else {
// see PaxosService::dispatch(); we rely on this being anon
// (c->msgr == NULL)
PaxosServiceMessage *req = m->claim_message();
ceph_assert(req != NULL);
auto c = ceph::make_ref<AnonConnection>(cct, m->client_socket_addr);
MonSession *s = new MonSession(static_cast<Connection*>(c.get()));
s->_ident(req->get_source(),
req->get_source_addrs());
c->set_priv(RefCountedPtr{s, false});
c->set_peer_addrs(m->client_addrs);
c->set_peer_type(m->client_type);
c->set_features(m->con_features);
s->authenticated = true;
s->caps = m->client_caps;
dout(10) << " caps are " << s->caps << dendl;
s->entity_name = m->entity_name;
dout(10) << " entity name '" << s->entity_name << "' type "
<< s->entity_name.get_type() << dendl;
s->proxy_con = m->get_connection();
s->proxy_tid = m->tid;
req->set_connection(c);
// not super accurate, but better than nothing.
req->set_recv_stamp(m->get_recv_stamp());
/*
* note which election epoch this is; we will drop the message if
* there is a future election since our peers will resend routed
* requests in that case.
*/
req->rx_election_epoch = get_epoch();
dout(10) << " mesg " << req << " from " << m->get_source_addr() << dendl;
_ms_dispatch(req);
// break the session <-> con ref loop by removing the con->session
// reference, which is no longer needed once the MonOpRequest is
// set up.
c->set_priv(NULL);
}
}
void Monitor::send_reply(MonOpRequestRef op, Message *reply)
{
op->mark_event(__func__);
MonSession *session = op->get_session();
ceph_assert(session);
Message *req = op->get_req();
ConnectionRef con = op->get_connection();
reply->set_cct(g_ceph_context);
dout(2) << __func__ << " " << op << " " << reply << " " << *reply << dendl;
if (!con) {
dout(2) << "send_reply no connection, dropping reply " << *reply
<< " to " << req << " " << *req << dendl;
reply->put();
op->mark_event("reply: no connection");
return;
}
if (!session->con && !session->proxy_con) {
dout(2) << "send_reply no connection, dropping reply " << *reply
<< " to " << req << " " << *req << dendl;
reply->put();
op->mark_event("reply: no connection");
return;
}
if (session->proxy_con) {
dout(15) << "send_reply routing reply to " << con->get_peer_addr()
<< " via " << session->proxy_con->get_peer_addr()
<< " for request " << *req << dendl;
session->proxy_con->send_message(new MRoute(session->proxy_tid, reply));
op->mark_event("reply: send routed request");
} else {
session->con->send_message(reply);
op->mark_event("reply: send");
}
}
void Monitor::no_reply(MonOpRequestRef op)
{
MonSession *session = op->get_session();
Message *req = op->get_req();
if (session->proxy_con) {
dout(10) << "no_reply to " << req->get_source_inst()
<< " via " << session->proxy_con->get_peer_addr()
<< " for request " << *req << dendl;
session->proxy_con->send_message(new MRoute(session->proxy_tid, NULL));
op->mark_event("no_reply: send routed request");
} else {
dout(10) << "no_reply to " << req->get_source_inst()
<< " " << *req << dendl;
op->mark_event("no_reply");
}
}
void Monitor::handle_route(MonOpRequestRef op)
{
auto m = op->get_req<MRoute>();
MonSession *session = op->get_session();
//check privileges
if (!session->is_capable("mon", MON_CAP_X)) {
dout(0) << "MRoute received from entity without appropriate perms! "
<< dendl;
return;
}
if (m->msg)
dout(10) << "handle_route tid " << m->session_mon_tid << " " << *m->msg
<< dendl;
else
dout(10) << "handle_route tid " << m->session_mon_tid << " null" << dendl;
// look it up
if (!m->session_mon_tid) {
dout(10) << " not a routed request, ignoring" << dendl;
return;
}
auto found = routed_requests.find(m->session_mon_tid);
if (found == routed_requests.end()) {
dout(10) << " don't have routed request tid " << m->session_mon_tid << dendl;
return;
}
std::unique_ptr<RoutedRequest> rr{found->second};
// reset payload, in case encoding is dependent on target features
if (m->msg) {
m->msg->clear_payload();
rr->con->send_message(m->msg);
m->msg = NULL;
}
if (m->send_osdmap_first) {
dout(10) << " sending osdmaps from " << m->send_osdmap_first << dendl;
osdmon()->send_incremental(m->send_osdmap_first, rr->session,
true, MonOpRequestRef());
}
ceph_assert(rr->tid == m->session_mon_tid && rr->session->routed_request_tids.count(m->session_mon_tid));
routed_requests.erase(found);
rr->session->routed_request_tids.erase(m->session_mon_tid);
}
void Monitor::resend_routed_requests()
{
dout(10) << "resend_routed_requests" << dendl;
int mon = get_leader();
list<Context*> retry;
for (map<uint64_t, RoutedRequest*>::iterator p = routed_requests.begin();
p != routed_requests.end();
++p) {
RoutedRequest *rr = p->second;
if (mon == rank) {
dout(10) << " requeue for self tid " << rr->tid << dendl;
rr->op->mark_event("retry routed request");
retry.push_back(new C_RetryMessage(this, rr->op));
if (rr->session) {
ceph_assert(rr->session->routed_request_tids.count(p->first));
rr->session->routed_request_tids.erase(p->first);
}
delete rr;
} else {
auto q = rr->request_bl.cbegin();
PaxosServiceMessage *req =
(PaxosServiceMessage *)decode_message(cct, 0, q);
rr->op->mark_event("resend forwarded message to leader");
dout(10) << " resend to mon." << mon << " tid " << rr->tid << " " << *req
<< dendl;
MForward *forward = new MForward(rr->tid,
req,
rr->con_features,
rr->session->caps);
req->put(); // forward takes its own ref; drop ours.
forward->client_type = rr->con->get_peer_type();
forward->client_addrs = rr->con->get_peer_addrs();
forward->client_socket_addr = rr->con->get_peer_socket_addr();
forward->set_priority(req->get_priority());
send_mon_message(forward, mon);
}
}
if (mon == rank) {
routed_requests.clear();
finish_contexts(g_ceph_context, retry);
}
}
void Monitor::remove_session(MonSession *s)
{
dout(10) << "remove_session " << s << " " << s->name << " " << s->addrs
<< " features 0x" << std::hex << s->con_features << std::dec << dendl;
ceph_assert(s->con);
ceph_assert(!s->closed);
for (set<uint64_t>::iterator p = s->routed_request_tids.begin();
p != s->routed_request_tids.end();
++p) {
ceph_assert(routed_requests.count(*p));
RoutedRequest *rr = routed_requests[*p];
dout(10) << " dropping routed request " << rr->tid << dendl;
delete rr;
routed_requests.erase(*p);
}
s->routed_request_tids.clear();
s->con->set_priv(nullptr);
session_map.remove_session(s);
logger->set(l_mon_num_sessions, session_map.get_size());
logger->inc(l_mon_session_rm);
}
void Monitor::remove_all_sessions()
{
std::lock_guard l(session_map_lock);
while (!session_map.sessions.empty()) {
MonSession *s = session_map.sessions.front();
remove_session(s);
logger->inc(l_mon_session_rm);
}
if (logger)
logger->set(l_mon_num_sessions, session_map.get_size());
}
void Monitor::send_mon_message(Message *m, int rank)
{
messenger->send_to_mon(m, monmap->get_addrs(rank));
}
void Monitor::waitlist_or_zap_client(MonOpRequestRef op)
{
/**
* Wait list the new session until we're in the quorum, assuming it's
* sufficiently new.
* tick() will periodically send them back through so we can send
* the client elsewhere if we don't think we're getting back in.
*
* But we allow a few sorts of messages:
* 1) Monitors can talk to us at any time, of course.
* 2) auth messages. It's unlikely to go through much faster, but
* it's possible we've just lost our quorum status and we want to take...
* 3) command messages. We want to accept these under all possible
* circumstances.
*/
Message *m = op->get_req();
MonSession *s = op->get_session();
ConnectionRef con = op->get_connection();
utime_t too_old = ceph_clock_now();
too_old -= g_ceph_context->_conf->mon_lease;
if (m->get_recv_stamp() > too_old &&
con->is_connected()) {
dout(5) << "waitlisting message " << *m << dendl;
maybe_wait_for_quorum.push_back(new C_RetryMessage(this, op));
op->mark_wait_for_quorum();
} else {
dout(5) << "discarding message " << *m << " and sending client elsewhere" << dendl;
con->mark_down();
// proxied sessions aren't registered and don't have a con; don't remove
// those.
if (!s->proxy_con) {
std::lock_guard l(session_map_lock);
remove_session(s);
}
op->mark_zap();
}
}
void Monitor::_ms_dispatch(Message *m)
{
if (is_shutdown()) {
m->put();
return;
}
MonOpRequestRef op = op_tracker.create_request<MonOpRequest>(m);
bool src_is_mon = op->is_src_mon();
op->mark_event("mon:_ms_dispatch");
MonSession *s = op->get_session();
if (s && s->closed) {
return;
}
if (src_is_mon && s) {
ConnectionRef con = m->get_connection();
if (con->get_messenger() && con->get_features() != s->con_features) {
// only update features if this is a non-anonymous connection
dout(10) << __func__ << " feature change for " << m->get_source_inst()
<< " (was " << s->con_features
<< ", now " << con->get_features() << ")" << dendl;
// connection features changed - recreate session.
if (s->con && s->con != con) {
dout(10) << __func__ << " connection for " << m->get_source_inst()
<< " changed from session; mark down and replace" << dendl;
s->con->mark_down();
}
if (s->item.is_on_list()) {
// forwarded messages' sessions are not in the sessions map and
// exist only while the op is being handled.
std::lock_guard l(session_map_lock);
remove_session(s);
}
s = nullptr;
}
}
if (!s) {
// if the sender is not a monitor, make sure their first message for a
// session is an MAuth. If it is not, assume it's a stray message,
// and considering that we are creating a new session it is safe to
// assume that the sender hasn't authenticated yet, so we have no way
// of assessing whether we should handle it or not.
if (!src_is_mon && (m->get_type() != CEPH_MSG_AUTH &&
m->get_type() != CEPH_MSG_MON_GET_MAP &&
m->get_type() != CEPH_MSG_PING)) {
dout(1) << __func__ << " dropping stray message " << *m
<< " from " << m->get_source_inst() << dendl;
return;
}
ConnectionRef con = m->get_connection();
{
std::lock_guard l(session_map_lock);
s = session_map.new_session(m->get_source(),
m->get_source_addrs(),
con.get());
}
ceph_assert(s);
con->set_priv(RefCountedPtr{s, false});
dout(10) << __func__ << " new session " << s << " " << *s
<< " features 0x" << std::hex
<< s->con_features << std::dec << dendl;
op->set_session(s);
logger->set(l_mon_num_sessions, session_map.get_size());
logger->inc(l_mon_session_add);
if (src_is_mon) {
// give it monitor caps; the peer type has been authenticated
dout(5) << __func__ << " setting monitor caps on this connection" << dendl;
if (!s->caps.is_allow_all()) // but no need to repeatedly copy
s->caps = mon_caps;
s->authenticated = true;
}
} else {
dout(20) << __func__ << " existing session " << s << " for " << s->name
<< dendl;
}
ceph_assert(s);
s->session_timeout = ceph_clock_now();
s->session_timeout += g_conf()->mon_session_timeout;
if (s->auth_handler) {
s->entity_name = s->auth_handler->get_entity_name();
s->global_id = s->auth_handler->get_global_id();
s->global_id_status = s->auth_handler->get_global_id_status();
}
dout(20) << " entity_name " << s->entity_name
<< " global_id " << s->global_id
<< " (" << s->global_id_status
<< ") caps " << s->caps.get_str() << dendl;
if (!session_stretch_allowed(s, op)) {
return;
}
if ((is_synchronizing() ||
(!s->authenticated && !exited_quorum.is_zero())) &&
!src_is_mon &&
m->get_type() != CEPH_MSG_PING) {
waitlist_or_zap_client(op);
} else {
dispatch_op(op);
}
return;
}
void Monitor::dispatch_op(MonOpRequestRef op)
{
op->mark_event("mon:dispatch_op");
MonSession *s = op->get_session();
ceph_assert(s);
if (s->closed) {
dout(10) << " session closed, dropping " << op->get_req() << dendl;
return;
}
/* we will consider the default type as being 'monitor' until proven wrong */
op->set_type_monitor();
/* deal with all messages that do not necessarily need caps */
switch (op->get_req()->get_type()) {
// auth
case MSG_MON_GLOBAL_ID:
case MSG_MON_USED_PENDING_KEYS:
case CEPH_MSG_AUTH:
op->set_type_service();
/* no need to check caps here */
paxos_service[PAXOS_AUTH]->dispatch(op);
return;
case CEPH_MSG_PING:
handle_ping(op);
return;
case MSG_COMMAND:
op->set_type_command();
handle_tell_command(op);
return;
}
if (!op->get_session()->authenticated) {
dout(5) << __func__ << " " << op->get_req()->get_source_inst()
<< " is not authenticated, dropping " << *(op->get_req())
<< dendl;
return;
}
// global_id_status == NONE: all sessions for auth_none and krb,
// mon <-> mon sessions (including proxied sessions) for cephx
ceph_assert(s->global_id_status == global_id_status_t::NONE ||
s->global_id_status == global_id_status_t::NEW_OK ||
s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED ||
s->global_id_status == global_id_status_t::RECLAIM_OK ||
s->global_id_status == global_id_status_t::RECLAIM_INSECURE);
// let mon_getmap through for "ping" (which doesn't reconnect)
// and "tell" (which reconnects but doesn't attempt to preserve
// its global_id and stays in NEW_NOT_EXPOSED, retrying until
// ->send_attempts reaches 0)
if (cct->_conf->auth_expose_insecure_global_id_reclaim &&
s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED &&
op->get_req()->get_type() != CEPH_MSG_MON_GET_MAP) {
dout(5) << __func__ << " " << op->get_req()->get_source_inst()
<< " may omit old_ticket on reconnects, discarding "
<< *op->get_req() << " and forcing reconnect" << dendl;
ceph_assert(s->con && !s->proxy_con);
s->con->mark_down();
{
std::lock_guard l(session_map_lock);
remove_session(s);
}
op->mark_zap();
return;
}
switch (op->get_req()->get_type()) {
case CEPH_MSG_MON_GET_MAP:
handle_mon_get_map(op);
return;
case MSG_GET_CONFIG:
configmon()->handle_get_config(op);
return;
case CEPH_MSG_MON_SUBSCRIBE:
/* FIXME: check what's being subscribed, filter accordingly */
handle_subscribe(op);
return;
}
/* well, maybe the op belongs to a service... */
op->set_type_service();
/* deal with all messages which caps should be checked somewhere else */
switch (op->get_req()->get_type()) {
// OSDs
case CEPH_MSG_MON_GET_OSDMAP:
case CEPH_MSG_POOLOP:
case MSG_OSD_BEACON:
case MSG_OSD_MARK_ME_DOWN:
case MSG_OSD_MARK_ME_DEAD:
case MSG_OSD_FULL:
case MSG_OSD_FAILURE:
case MSG_OSD_BOOT:
case MSG_OSD_ALIVE:
case MSG_OSD_PGTEMP:
case MSG_OSD_PG_CREATED:
case MSG_REMOVE_SNAPS:
case MSG_MON_GET_PURGED_SNAPS:
case MSG_OSD_PG_READY_TO_MERGE:
paxos_service[PAXOS_OSDMAP]->dispatch(op);
return;
// MDSs
case MSG_MDS_BEACON:
case MSG_MDS_OFFLOAD_TARGETS:
paxos_service[PAXOS_MDSMAP]->dispatch(op);
return;
// Mgrs
case MSG_MGR_BEACON:
paxos_service[PAXOS_MGR]->dispatch(op);
return;
// MgrStat
case MSG_MON_MGR_REPORT:
case CEPH_MSG_STATFS:
case MSG_GETPOOLSTATS:
paxos_service[PAXOS_MGRSTAT]->dispatch(op);
return;
// log
case MSG_LOG:
paxos_service[PAXOS_LOG]->dispatch(op);
return;
// handle_command() does its own caps checking
case MSG_MON_COMMAND:
op->set_type_command();
handle_command(op);
return;
}
/* nop, looks like it's not a service message; revert back to monitor */
op->set_type_monitor();
/* messages we, the Monitor class, need to deal with
* but may be sent by clients. */
if (!op->get_session()->is_capable("mon", MON_CAP_R)) {
dout(5) << __func__ << " " << op->get_req()->get_source_inst()
<< " not enough caps for " << *(op->get_req()) << " -- dropping"
<< dendl;
return;
}
switch (op->get_req()->get_type()) {
// misc
case CEPH_MSG_MON_GET_VERSION:
handle_get_version(op);
return;
}
if (!op->is_src_mon()) {
dout(1) << __func__ << " unexpected monitor message from"
<< " non-monitor entity " << op->get_req()->get_source_inst()
<< " " << *(op->get_req()) << " -- dropping" << dendl;
return;
}
/* messages that should only be sent by another monitor */
switch (op->get_req()->get_type()) {
case MSG_ROUTE:
handle_route(op);
return;
case MSG_MON_PROBE:
handle_probe(op);
return;
// Sync (i.e., the new slurp, but on steroids)
case MSG_MON_SYNC:
handle_sync(op);
return;
case MSG_MON_SCRUB:
handle_scrub(op);
return;
/* log acks are sent from a monitor we sent the MLog to, and are
never sent by clients to us. */
case MSG_LOGACK:
log_client.handle_log_ack((MLogAck*)op->get_req());
return;
// monmap
case MSG_MON_JOIN:
op->set_type_service();
paxos_service[PAXOS_MONMAP]->dispatch(op);
return;
// paxos
case MSG_MON_PAXOS:
{
op->set_type_paxos();
auto pm = op->get_req<MMonPaxos>();
if (!op->get_session()->is_capable("mon", MON_CAP_X)) {
//can't send these!
return;
}
if (state == STATE_SYNCHRONIZING) {
// we are synchronizing. These messages would do us no
// good, thus just drop them and ignore them.
dout(10) << __func__ << " ignore paxos msg from "
<< pm->get_source_inst() << dendl;
return;
}
// sanitize
if (pm->epoch > get_epoch()) {
bootstrap();
return;
}
if (pm->epoch != get_epoch()) {
return;
}
paxos->dispatch(op);
}
return;
// elector messages
case MSG_MON_ELECTION:
op->set_type_election_or_ping();
//check privileges here for simplicity
if (!op->get_session()->is_capable("mon", MON_CAP_X)) {
dout(0) << "MMonElection received from entity without enough caps!"
<< op->get_session()->caps << dendl;
return;;
}
if (!is_probing() && !is_synchronizing()) {
elector.dispatch(op);
}
return;
case MSG_MON_PING:
op->set_type_election_or_ping();
elector.dispatch(op);
return;
case MSG_FORWARD:
handle_forward(op);
return;
case MSG_TIMECHECK:
dout(5) << __func__ << " ignoring " << op << dendl;
return;
case MSG_TIMECHECK2:
handle_timecheck(op);
return;
case MSG_MON_HEALTH:
dout(5) << __func__ << " dropping deprecated message: "
<< *op->get_req() << dendl;
break;
case MSG_MON_HEALTH_CHECKS:
op->set_type_service();
paxos_service[PAXOS_HEALTH]->dispatch(op);
return;
}
dout(1) << "dropping unexpected " << *(op->get_req()) << dendl;
return;
}
void Monitor::handle_ping(MonOpRequestRef op)
{
auto m = op->get_req<MPing>();
dout(10) << __func__ << " " << *m << dendl;
MPing *reply = new MPing;
bufferlist payload;
boost::scoped_ptr<Formatter> f(new JSONFormatter(true));
f->open_object_section("pong");
healthmon()->get_health_status(false, f.get(), nullptr);
get_mon_status(f.get());
f->close_section();
stringstream ss;
f->flush(ss);
encode(ss.str(), payload);
reply->set_payload(payload);
dout(10) << __func__ << " reply payload len " << reply->get_payload().length() << dendl;
m->get_connection()->send_message(reply);
}
void Monitor::timecheck_start()
{
dout(10) << __func__ << dendl;
timecheck_cleanup();
if (get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
timecheck_start_round();
}
}
void Monitor::timecheck_finish()
{
dout(10) << __func__ << dendl;
timecheck_cleanup();
}
void Monitor::timecheck_start_round()
{
dout(10) << __func__ << " curr " << timecheck_round << dendl;
ceph_assert(is_leader());
if (monmap->size() == 1) {
ceph_abort_msg("We are alone; this shouldn't have been scheduled!");
return;
}
if (timecheck_round % 2) {
dout(10) << __func__ << " there's a timecheck going on" << dendl;
utime_t curr_time = ceph_clock_now();
double max = g_conf()->mon_timecheck_interval*3;
if (curr_time - timecheck_round_start < max) {
dout(10) << __func__ << " keep current round going" << dendl;
goto out;
} else {
dout(10) << __func__
<< " finish current timecheck and start new" << dendl;
timecheck_cancel_round();
}
}
ceph_assert(timecheck_round % 2 == 0);
timecheck_acks = 0;
timecheck_round ++;
timecheck_round_start = ceph_clock_now();
dout(10) << __func__ << " new " << timecheck_round << dendl;
timecheck();
out:
dout(10) << __func__ << " setting up next event" << dendl;
timecheck_reset_event();
}
void Monitor::timecheck_finish_round(bool success)
{
dout(10) << __func__ << " curr " << timecheck_round << dendl;
ceph_assert(timecheck_round % 2);
timecheck_round ++;
timecheck_round_start = utime_t();
if (success) {
ceph_assert(timecheck_waiting.empty());
ceph_assert(timecheck_acks == quorum.size());
timecheck_report();
timecheck_check_skews();
return;
}
dout(10) << __func__ << " " << timecheck_waiting.size()
<< " peers still waiting:";
for (auto& p : timecheck_waiting) {
*_dout << " mon." << p.first;
}
*_dout << dendl;
timecheck_waiting.clear();
dout(10) << __func__ << " finished to " << timecheck_round << dendl;
}
void Monitor::timecheck_cancel_round()
{
timecheck_finish_round(false);
}
void Monitor::timecheck_cleanup()
{
timecheck_round = 0;
timecheck_acks = 0;
timecheck_round_start = utime_t();
if (timecheck_event) {
timer.cancel_event(timecheck_event);
timecheck_event = NULL;
}
timecheck_waiting.clear();
timecheck_skews.clear();
timecheck_latencies.clear();
timecheck_rounds_since_clean = 0;
}
void Monitor::timecheck_reset_event()
{
if (timecheck_event) {
timer.cancel_event(timecheck_event);
timecheck_event = NULL;
}
double delay =
cct->_conf->mon_timecheck_skew_interval * timecheck_rounds_since_clean;
if (delay <= 0 || delay > cct->_conf->mon_timecheck_interval) {
delay = cct->_conf->mon_timecheck_interval;
}
dout(10) << __func__ << " delay " << delay
<< " rounds_since_clean " << timecheck_rounds_since_clean
<< dendl;
timecheck_event = timer.add_event_after(
delay,
new C_MonContext{this, [this](int) {
timecheck_start_round();
}});
}
void Monitor::timecheck_check_skews()
{
dout(10) << __func__ << dendl;
ceph_assert(is_leader());
ceph_assert((timecheck_round % 2) == 0);
if (monmap->size() == 1) {
ceph_abort_msg("We are alone; we shouldn't have gotten here!");
return;
}
ceph_assert(timecheck_latencies.size() == timecheck_skews.size());
bool found_skew = false;
for (auto& p : timecheck_skews) {
double abs_skew;
if (timecheck_has_skew(p.second, &abs_skew)) {
dout(10) << __func__
<< " " << p.first << " skew " << abs_skew << dendl;
found_skew = true;
}
}
if (found_skew) {
++timecheck_rounds_since_clean;
timecheck_reset_event();
} else if (timecheck_rounds_since_clean > 0) {
dout(1) << __func__
<< " no clock skews found after " << timecheck_rounds_since_clean
<< " rounds" << dendl;
// make sure the skews are really gone and not just a transient success
// this will run just once if not in the presence of skews again.
timecheck_rounds_since_clean = 1;
timecheck_reset_event();
timecheck_rounds_since_clean = 0;
}
}
void Monitor::timecheck_report()
{
dout(10) << __func__ << dendl;
ceph_assert(is_leader());
ceph_assert((timecheck_round % 2) == 0);
if (monmap->size() == 1) {
ceph_abort_msg("We are alone; we shouldn't have gotten here!");
return;
}
ceph_assert(timecheck_latencies.size() == timecheck_skews.size());
bool do_output = true; // only output report once
for (set<int>::iterator q = quorum.begin(); q != quorum.end(); ++q) {
if (monmap->get_name(*q) == name)
continue;
MTimeCheck2 *m = new MTimeCheck2(MTimeCheck2::OP_REPORT);
m->epoch = get_epoch();
m->round = timecheck_round;
for (auto& it : timecheck_skews) {
double skew = it.second;
double latency = timecheck_latencies[it.first];
m->skews[it.first] = skew;
m->latencies[it.first] = latency;
if (do_output) {
dout(25) << __func__ << " mon." << it.first
<< " latency " << latency
<< " skew " << skew << dendl;
}
}
do_output = false;
dout(10) << __func__ << " send report to mon." << *q << dendl;
send_mon_message(m, *q);
}
}
void Monitor::timecheck()
{
dout(10) << __func__ << dendl;
ceph_assert(is_leader());
if (monmap->size() == 1) {
ceph_abort_msg("We are alone; we shouldn't have gotten here!");
return;
}
ceph_assert(timecheck_round % 2 != 0);
timecheck_acks = 1; // we ack ourselves
dout(10) << __func__ << " start timecheck epoch " << get_epoch()
<< " round " << timecheck_round << dendl;
// we are at the eye of the storm; the point of reference
timecheck_skews[rank] = 0.0;
timecheck_latencies[rank] = 0.0;
for (set<int>::iterator it = quorum.begin(); it != quorum.end(); ++it) {
if (monmap->get_name(*it) == name)
continue;
utime_t curr_time = ceph_clock_now();
timecheck_waiting[*it] = curr_time;
MTimeCheck2 *m = new MTimeCheck2(MTimeCheck2::OP_PING);
m->epoch = get_epoch();
m->round = timecheck_round;
dout(10) << __func__ << " send " << *m << " to mon." << *it << dendl;
send_mon_message(m, *it);
}
}
health_status_t Monitor::timecheck_status(ostringstream &ss,
const double skew_bound,
const double latency)
{
health_status_t status = HEALTH_OK;
ceph_assert(latency >= 0);
double abs_skew;
if (timecheck_has_skew(skew_bound, &abs_skew)) {
status = HEALTH_WARN;
ss << "clock skew " << abs_skew << "s"
<< " > max " << g_conf()->mon_clock_drift_allowed << "s";
}
return status;
}
void Monitor::handle_timecheck_leader(MonOpRequestRef op)
{
auto m = op->get_req<MTimeCheck2>();
dout(10) << __func__ << " " << *m << dendl;
/* handles PONG's */
ceph_assert(m->op == MTimeCheck2::OP_PONG);
int other = m->get_source().num();
if (m->epoch < get_epoch()) {
dout(1) << __func__ << " got old timecheck epoch " << m->epoch
<< " from " << other
<< " curr " << get_epoch()
<< " -- severely lagged? discard" << dendl;
return;
}
ceph_assert(m->epoch == get_epoch());
if (m->round < timecheck_round) {
dout(1) << __func__ << " got old round " << m->round
<< " from " << other
<< " curr " << timecheck_round << " -- discard" << dendl;
return;
}
utime_t curr_time = ceph_clock_now();
ceph_assert(timecheck_waiting.count(other) > 0);
utime_t timecheck_sent = timecheck_waiting[other];
timecheck_waiting.erase(other);
if (curr_time < timecheck_sent) {
// our clock was readjusted -- drop everything until it all makes sense.
dout(1) << __func__ << " our clock was readjusted --"
<< " bump round and drop current check"
<< dendl;
timecheck_cancel_round();
return;
}
/* update peer latencies */
double latency = (double)(curr_time - timecheck_sent);
if (timecheck_latencies.count(other) == 0)
timecheck_latencies[other] = latency;
else {
double avg_latency = ((timecheck_latencies[other]*0.8)+(latency*0.2));
timecheck_latencies[other] = avg_latency;
}
/*
* update skews
*
* some nasty thing goes on if we were to do 'a - b' between two utime_t,
* and 'a' happens to be lower than 'b'; so we use double instead.
*
* latency is always expected to be >= 0.
*
* delta, the difference between theirs timestamp and ours, may either be
* lower or higher than 0; will hardly ever be 0.
*
* The absolute skew is the absolute delta minus the latency, which is
* taken as a whole instead of an rtt given that there is some queueing
* and dispatch times involved and it's hard to assess how long exactly
* it took for the message to travel to the other side and be handled. So
* we call it a bounded skew, the worst case scenario.
*
* Now, to math!
*
* Given that the latency is always positive, we can establish that the
* bounded skew will be:
*
* 1. positive if the absolute delta is higher than the latency and
* delta is positive
* 2. negative if the absolute delta is higher than the latency and
* delta is negative.
* 3. zero if the absolute delta is lower than the latency.
*
* On 3. we make a judgement call and treat the skew as non-existent.
* This is because that, if the absolute delta is lower than the
* latency, then the apparently existing skew is nothing more than a
* side-effect of the high latency at work.
*
* This may not be entirely true though, as a severely skewed clock
* may be masked by an even higher latency, but with high latencies
* we probably have worse issues to deal with than just skewed clocks.
*/
ceph_assert(latency >= 0);
double delta = ((double) m->timestamp) - ((double) curr_time);
double abs_delta = (delta > 0 ? delta : -delta);
double skew_bound = abs_delta - latency;
if (skew_bound < 0)
skew_bound = 0;
else if (delta < 0)
skew_bound = -skew_bound;
ostringstream ss;
health_status_t status = timecheck_status(ss, skew_bound, latency);
if (status != HEALTH_OK) {
clog->health(status) << other << " " << ss.str();
}
dout(10) << __func__ << " from " << other << " ts " << m->timestamp
<< " delta " << delta << " skew_bound " << skew_bound
<< " latency " << latency << dendl;
timecheck_skews[other] = skew_bound;
timecheck_acks++;
if (timecheck_acks == quorum.size()) {
dout(10) << __func__ << " got pongs from everybody ("
<< timecheck_acks << " total)" << dendl;
ceph_assert(timecheck_skews.size() == timecheck_acks);
ceph_assert(timecheck_waiting.empty());
// everyone has acked, so bump the round to finish it.
timecheck_finish_round();
}
}
void Monitor::handle_timecheck_peon(MonOpRequestRef op)
{
auto m = op->get_req<MTimeCheck2>();
dout(10) << __func__ << " " << *m << dendl;
ceph_assert(is_peon());
ceph_assert(m->op == MTimeCheck2::OP_PING || m->op == MTimeCheck2::OP_REPORT);
if (m->epoch != get_epoch()) {
dout(1) << __func__ << " got wrong epoch "
<< "(ours " << get_epoch()
<< " theirs: " << m->epoch << ") -- discarding" << dendl;
return;
}
if (m->round < timecheck_round) {
dout(1) << __func__ << " got old round " << m->round
<< " current " << timecheck_round
<< " (epoch " << get_epoch() << ") -- discarding" << dendl;
return;
}
timecheck_round = m->round;
if (m->op == MTimeCheck2::OP_REPORT) {
ceph_assert((timecheck_round % 2) == 0);
timecheck_latencies.swap(m->latencies);
timecheck_skews.swap(m->skews);
return;
}
ceph_assert((timecheck_round % 2) != 0);
MTimeCheck2 *reply = new MTimeCheck2(MTimeCheck2::OP_PONG);
utime_t curr_time = ceph_clock_now();
reply->timestamp = curr_time;
reply->epoch = m->epoch;
reply->round = m->round;
dout(10) << __func__ << " send " << *m
<< " to " << m->get_source_inst() << dendl;
m->get_connection()->send_message(reply);
}
void Monitor::handle_timecheck(MonOpRequestRef op)
{
auto m = op->get_req<MTimeCheck2>();
dout(10) << __func__ << " " << *m << dendl;
if (is_leader()) {
if (m->op != MTimeCheck2::OP_PONG) {
dout(1) << __func__ << " drop unexpected msg (not pong)" << dendl;
} else {
handle_timecheck_leader(op);
}
} else if (is_peon()) {
if (m->op != MTimeCheck2::OP_PING && m->op != MTimeCheck2::OP_REPORT) {
dout(1) << __func__ << " drop unexpected msg (not ping or report)" << dendl;
} else {
handle_timecheck_peon(op);
}
} else {
dout(1) << __func__ << " drop unexpected msg" << dendl;
}
}
void Monitor::handle_subscribe(MonOpRequestRef op)
{
auto m = op->get_req<MMonSubscribe>();
dout(10) << "handle_subscribe " << *m << dendl;
bool reply = false;
MonSession *s = op->get_session();
ceph_assert(s);
if (m->hostname.size()) {
s->remote_host = m->hostname;
}
for (map<string,ceph_mon_subscribe_item>::iterator p = m->what.begin();
p != m->what.end();
++p) {
if (p->first == "monmap" || p->first == "config") {
// these require no caps
} else if (!s->is_capable("mon", MON_CAP_R)) {
dout(5) << __func__ << " " << op->get_req()->get_source_inst()
<< " not enough caps for " << *(op->get_req()) << " -- dropping"
<< dendl;
continue;
}
// if there are any non-onetime subscriptions, we need to reply to start the resubscribe timer
if ((p->second.flags & CEPH_SUBSCRIBE_ONETIME) == 0)
reply = true;
// remove conflicting subscribes
if (logmon()->sub_name_to_id(p->first) >= 0) {
for (map<string, Subscription*>::iterator it = s->sub_map.begin();
it != s->sub_map.end(); ) {
if (it->first != p->first && logmon()->sub_name_to_id(it->first) >= 0) {
std::lock_guard l(session_map_lock);
session_map.remove_sub((it++)->second);
} else {
++it;
}
}
}
{
std::lock_guard l(session_map_lock);
session_map.add_update_sub(s, p->first, p->second.start,
p->second.flags & CEPH_SUBSCRIBE_ONETIME,
m->get_connection()->has_feature(CEPH_FEATURE_INCSUBOSDMAP));
}
if (p->first.compare(0, 6, "mdsmap") == 0 || p->first.compare(0, 5, "fsmap") == 0) {
dout(10) << __func__ << ": MDS sub '" << p->first << "'" << dendl;
if ((int)s->is_capable("mds", MON_CAP_R)) {
Subscription *sub = s->sub_map[p->first];
ceph_assert(sub != nullptr);
mdsmon()->check_sub(sub);
}
} else if (p->first == "osdmap") {
if ((int)s->is_capable("osd", MON_CAP_R)) {
if (s->osd_epoch > p->second.start) {
// client needs earlier osdmaps on purpose, so reset the sent epoch
s->osd_epoch = 0;
}
osdmon()->check_osdmap_sub(s->sub_map["osdmap"]);
}
} else if (p->first == "osd_pg_creates") {
if ((int)s->is_capable("osd", MON_CAP_W)) {
osdmon()->check_pg_creates_sub(s->sub_map["osd_pg_creates"]);
}
} else if (p->first == "monmap") {
monmon()->check_sub(s->sub_map[p->first]);
} else if (logmon()->sub_name_to_id(p->first) >= 0) {
logmon()->check_sub(s->sub_map[p->first]);
} else if (p->first == "mgrmap" || p->first == "mgrdigest") {
mgrmon()->check_sub(s->sub_map[p->first]);
} else if (p->first == "servicemap") {
mgrstatmon()->check_sub(s->sub_map[p->first]);
} else if (p->first == "config") {
configmon()->check_sub(s);
} else if (p->first.find("kv:") == 0) {
kvmon()->check_sub(s->sub_map[p->first]);
}
}
if (reply) {
// we only need to reply if the client is old enough to think it
// has to send renewals.
ConnectionRef con = m->get_connection();
if (!con->has_feature(CEPH_FEATURE_MON_STATEFUL_SUB))
m->get_connection()->send_message(new MMonSubscribeAck(
monmap->get_fsid(), (int)g_conf()->mon_subscribe_interval));
}
}
void Monitor::handle_get_version(MonOpRequestRef op)
{
auto m = op->get_req<MMonGetVersion>();
dout(10) << "handle_get_version " << *m << dendl;
PaxosService *svc = NULL;
MonSession *s = op->get_session();
ceph_assert(s);
if (!is_leader() && !is_peon()) {
dout(10) << " waiting for quorum" << dendl;
waitfor_quorum.push_back(new C_RetryMessage(this, op));
goto out;
}
if (m->what == "mdsmap") {
svc = mdsmon();
} else if (m->what == "fsmap") {
svc = mdsmon();
} else if (m->what == "osdmap") {
svc = osdmon();
} else if (m->what == "monmap") {
svc = monmon();
} else {
derr << "invalid map type " << m->what << dendl;
}
if (svc) {
if (!svc->is_readable()) {
svc->wait_for_readable(op, new C_RetryMessage(this, op));
goto out;
}
MMonGetVersionReply *reply = new MMonGetVersionReply();
reply->handle = m->handle;
reply->version = svc->get_last_committed();
reply->oldest_version = svc->get_first_committed();
reply->set_tid(m->get_tid());
m->get_connection()->send_message(reply);
}
out:
return;
}
bool Monitor::ms_handle_reset(Connection *con)
{
dout(10) << "ms_handle_reset " << con << " " << con->get_peer_addr() << dendl;
// ignore lossless monitor sessions
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON)
return false;
auto priv = con->get_priv();
auto s = static_cast<MonSession*>(priv.get());
if (!s)
return false;
// break any con <-> session ref cycle
s->con->set_priv(nullptr);
if (is_shutdown())
return false;
std::lock_guard l(lock);
dout(10) << "reset/close on session " << s->name << " " << s->addrs << dendl;
if (!s->closed && s->item.is_on_list()) {
std::lock_guard l(session_map_lock);
remove_session(s);
}
return true;
}
bool Monitor::ms_handle_refused(Connection *con)
{
// just log for now...
dout(10) << "ms_handle_refused " << con << " " << con->get_peer_addr() << dendl;
return false;
}
// -----
void Monitor::send_latest_monmap(Connection *con)
{
bufferlist bl;
monmap->encode(bl, con->get_features());
con->send_message(new MMonMap(bl));
}
void Monitor::handle_mon_get_map(MonOpRequestRef op)
{
auto m = op->get_req<MMonGetMap>();
dout(10) << "handle_mon_get_map" << dendl;
send_latest_monmap(m->get_connection().get());
}
int Monitor::load_metadata()
{
bufferlist bl;
int r = store->get(MONITOR_STORE_PREFIX, "last_metadata", bl);
if (r)
return r;
auto it = bl.cbegin();
decode(mon_metadata, it);
pending_metadata = mon_metadata;
return 0;
}
int Monitor::get_mon_metadata(int mon, Formatter *f, ostream& err)
{
ceph_assert(f);
if (!mon_metadata.count(mon)) {
err << "mon." << mon << " not found";
return -EINVAL;
}
const Metadata& m = mon_metadata[mon];
for (Metadata::const_iterator p = m.begin(); p != m.end(); ++p) {
f->dump_string(p->first.c_str(), p->second);
}
return 0;
}
void Monitor::count_metadata(const string& field, map<string,int> *out)
{
for (auto& p : mon_metadata) {
auto q = p.second.find(field);
if (q == p.second.end()) {
(*out)["unknown"]++;
} else {
(*out)[q->second]++;
}
}
}
void Monitor::count_metadata(const string& field, Formatter *f)
{
map<string,int> by_val;
count_metadata(field, &by_val);
f->open_object_section(field.c_str());
for (auto& p : by_val) {
f->dump_int(p.first.c_str(), p.second);
}
f->close_section();
}
void Monitor::get_all_versions(std::map<string, list<string> > &versions)
{
// mon
get_versions(versions);
// osd
osdmon()->get_versions(versions);
// mgr
mgrmon()->get_versions(versions);
// mds
mdsmon()->get_versions(versions);
dout(20) << __func__ << " all versions=" << versions << dendl;
}
void Monitor::get_versions(std::map<string, list<string> > &versions)
{
for (auto& [rank, metadata] : mon_metadata) {
auto q = metadata.find("ceph_version_short");
if (q == metadata.end()) {
// not likely
continue;
}
versions[q->second].push_back(string("mon.") + monmap->get_name(rank));
}
}
int Monitor::print_nodes(Formatter *f, ostream& err)
{
map<string, list<string> > mons; // hostname => mon
for (map<int, Metadata>::iterator it = mon_metadata.begin();
it != mon_metadata.end(); ++it) {
const Metadata& m = it->second;
Metadata::const_iterator hostname = m.find("hostname");
if (hostname == m.end()) {
// not likely though
continue;
}
mons[hostname->second].push_back(monmap->get_name(it->first));
}
dump_services(f, mons, "mon");
return 0;
}
// ----------------------------------------------
// scrub
int Monitor::scrub_start()
{
dout(10) << __func__ << dendl;
ceph_assert(is_leader());
if (!scrub_result.empty()) {
clog->info() << "scrub already in progress";
return -EBUSY;
}
scrub_event_cancel();
scrub_result.clear();
scrub_state.reset(new ScrubState);
scrub();
return 0;
}
int Monitor::scrub()
{
ceph_assert(is_leader());
ceph_assert(scrub_state);
scrub_cancel_timeout();
wait_for_paxos_write();
scrub_version = paxos->get_version();
// scrub all keys if we're the only monitor in the quorum
int32_t num_keys =
(quorum.size() == 1 ? -1 : cct->_conf->mon_scrub_max_keys);
for (set<int>::iterator p = quorum.begin();
p != quorum.end();
++p) {
if (*p == rank)
continue;
MMonScrub *r = new MMonScrub(MMonScrub::OP_SCRUB, scrub_version,
num_keys);
r->key = scrub_state->last_key;
send_mon_message(r, *p);
}
// scrub my keys
bool r = _scrub(&scrub_result[rank],
&scrub_state->last_key,
&num_keys);
scrub_state->finished = !r;
// only after we got our scrub results do we really care whether the
// other monitors are late on their results. Also, this way we avoid
// triggering the timeout if we end up getting stuck in _scrub() for
// longer than the duration of the timeout.
scrub_reset_timeout();
if (quorum.size() == 1) {
ceph_assert(scrub_state->finished == true);
scrub_finish();
}
return 0;
}
void Monitor::handle_scrub(MonOpRequestRef op)
{
auto m = op->get_req<MMonScrub>();
dout(10) << __func__ << " " << *m << dendl;
switch (m->op) {
case MMonScrub::OP_SCRUB:
{
if (!is_peon())
break;
wait_for_paxos_write();
if (m->version != paxos->get_version())
break;
MMonScrub *reply = new MMonScrub(MMonScrub::OP_RESULT,
m->version,
m->num_keys);
reply->key = m->key;
_scrub(&reply->result, &reply->key, &reply->num_keys);
m->get_connection()->send_message(reply);
}
break;
case MMonScrub::OP_RESULT:
{
if (!is_leader())
break;
if (m->version != scrub_version)
break;
// reset the timeout each time we get a result
scrub_reset_timeout();
int from = m->get_source().num();
ceph_assert(scrub_result.count(from) == 0);
scrub_result[from] = m->result;
if (scrub_result.size() == quorum.size()) {
scrub_check_results();
scrub_result.clear();
if (scrub_state->finished)
scrub_finish();
else
scrub();
}
}
break;
}
}
bool Monitor::_scrub(ScrubResult *r,
pair<string,string> *start,
int *num_keys)
{
ceph_assert(r != NULL);
ceph_assert(start != NULL);
ceph_assert(num_keys != NULL);
set<string> prefixes = get_sync_targets_names();
prefixes.erase("paxos"); // exclude paxos, as this one may have extra states for proposals, etc.
dout(10) << __func__ << " start (" << *start << ")"
<< " num_keys " << *num_keys << dendl;
MonitorDBStore::Synchronizer it = store->get_synchronizer(*start, prefixes);
int scrubbed_keys = 0;
pair<string,string> last_key;
while (it->has_next_chunk()) {
if (*num_keys > 0 && scrubbed_keys == *num_keys)
break;
pair<string,string> k = it->get_next_key();
if (prefixes.count(k.first) == 0)
continue;
if (cct->_conf->mon_scrub_inject_missing_keys > 0.0 &&
(rand() % 10000 < cct->_conf->mon_scrub_inject_missing_keys*10000.0)) {
dout(10) << __func__ << " inject missing key, skipping (" << k << ")"
<< dendl;
continue;
}
bufferlist bl;
int err = store->get(k.first, k.second, bl);
ceph_assert(err == 0);
uint32_t key_crc = bl.crc32c(0);
dout(30) << __func__ << " " << k << " bl " << bl.length() << " bytes"
<< " crc " << key_crc << dendl;
r->prefix_keys[k.first]++;
if (r->prefix_crc.count(k.first) == 0) {
r->prefix_crc[k.first] = 0;
}
r->prefix_crc[k.first] = bl.crc32c(r->prefix_crc[k.first]);
if (cct->_conf->mon_scrub_inject_crc_mismatch > 0.0 &&
(rand() % 10000 < cct->_conf->mon_scrub_inject_crc_mismatch*10000.0)) {
dout(10) << __func__ << " inject failure at (" << k << ")" << dendl;
r->prefix_crc[k.first] += 1;
}
++scrubbed_keys;
last_key = k;
}
dout(20) << __func__ << " last_key (" << last_key << ")"
<< " scrubbed_keys " << scrubbed_keys
<< " has_next " << it->has_next_chunk() << dendl;
*start = last_key;
*num_keys = scrubbed_keys;
return it->has_next_chunk();
}
void Monitor::scrub_check_results()
{
dout(10) << __func__ << dendl;
// compare
int errors = 0;
ScrubResult& mine = scrub_result[rank];
for (map<int,ScrubResult>::iterator p = scrub_result.begin();
p != scrub_result.end();
++p) {
if (p->first == rank)
continue;
if (p->second != mine) {
++errors;
clog->error() << "scrub mismatch";
clog->error() << " mon." << rank << " " << mine;
clog->error() << " mon." << p->first << " " << p->second;
}
}
if (!errors)
clog->debug() << "scrub ok on " << quorum << ": " << mine;
}
inline void Monitor::scrub_timeout()
{
dout(1) << __func__ << " restarting scrub" << dendl;
scrub_reset();
scrub_start();
}
void Monitor::scrub_finish()
{
dout(10) << __func__ << dendl;
scrub_reset();
scrub_event_start();
}
void Monitor::scrub_reset()
{
dout(10) << __func__ << dendl;
scrub_cancel_timeout();
scrub_version = 0;
scrub_result.clear();
scrub_state.reset();
}
inline void Monitor::scrub_update_interval(ceph::timespan interval)
{
// we don't care about changes if we are not the leader.
// changes will be visible if we become the leader.
if (!is_leader())
return;
dout(1) << __func__ << " new interval = " << interval << dendl;
// if scrub already in progress, all changes will already be visible during
// the next round. Nothing to do.
if (scrub_state != NULL)
return;
scrub_event_cancel();
scrub_event_start();
}
void Monitor::scrub_event_start()
{
dout(10) << __func__ << dendl;
if (scrub_event)
scrub_event_cancel();
auto scrub_interval =
cct->_conf.get_val<std::chrono::seconds>("mon_scrub_interval");
if (scrub_interval == std::chrono::seconds::zero()) {
dout(1) << __func__ << " scrub event is disabled"
<< " (mon_scrub_interval = " << scrub_interval
<< ")" << dendl;
return;
}
scrub_event = timer.add_event_after(
scrub_interval,
new C_MonContext{this, [this](int) {
scrub_start();
}});
}
void Monitor::scrub_event_cancel()
{
dout(10) << __func__ << dendl;
if (scrub_event) {
timer.cancel_event(scrub_event);
scrub_event = NULL;
}
}
inline void Monitor::scrub_cancel_timeout()
{
if (scrub_timeout_event) {
timer.cancel_event(scrub_timeout_event);
scrub_timeout_event = NULL;
}
}
void Monitor::scrub_reset_timeout()
{
dout(15) << __func__ << " reset timeout event" << dendl;
scrub_cancel_timeout();
scrub_timeout_event = timer.add_event_after(
g_conf()->mon_scrub_timeout,
new C_MonContext{this, [this](int) {
scrub_timeout();
}});
}
/************ TICK ***************/
void Monitor::new_tick()
{
timer.add_event_after(g_conf()->mon_tick_interval, new C_MonContext{this, [this](int) {
tick();
}});
}
void Monitor::tick()
{
// ok go.
dout(11) << "tick" << dendl;
const utime_t now = ceph_clock_now();
// Check if we need to emit any delayed health check updated messages
if (is_leader()) {
const auto min_period = g_conf().get_val<int64_t>(
"mon_health_log_update_period");
for (auto& svc : paxos_service) {
auto health = svc->get_health_checks();
for (const auto &i : health.checks) {
const std::string &code = i.first;
const std::string &summary = i.second.summary;
const health_status_t severity = i.second.severity;
auto status_iter = health_check_log_times.find(code);
if (status_iter == health_check_log_times.end()) {
continue;
}
auto &log_status = status_iter->second;
bool const changed = log_status.last_message != summary
|| log_status.severity != severity;
if (changed && now - log_status.updated_at > min_period) {
log_status.last_message = summary;
log_status.updated_at = now;
log_status.severity = severity;
ostringstream ss;
ss << "Health check update: " << summary << " (" << code << ")";
clog->health(severity) << ss.str();
}
}
}
}
for (auto& svc : paxos_service) {
svc->tick();
svc->maybe_trim();
}
// trim sessions
{
std::lock_guard l(session_map_lock);
auto p = session_map.sessions.begin();
bool out_for_too_long = (!exited_quorum.is_zero() &&
now > (exited_quorum + 2*g_conf()->mon_lease));
while (!p.end()) {
MonSession *s = *p;
++p;
// don't trim monitors
if (s->name.is_mon())
continue;
if (s->session_timeout < now && s->con) {
// check keepalive, too
s->session_timeout = s->con->get_last_keepalive();
s->session_timeout += g_conf()->mon_session_timeout;
}
if (s->session_timeout < now) {
dout(10) << " trimming session " << s->con << " " << s->name
<< " " << s->addrs
<< " (timeout " << s->session_timeout
<< " < now " << now << ")" << dendl;
} else if (out_for_too_long) {
// boot the client Session because we've taken too long getting back in
dout(10) << " trimming session " << s->con << " " << s->name
<< " because we've been out of quorum too long" << dendl;
} else {
continue;
}
s->con->mark_down();
remove_session(s);
logger->inc(l_mon_session_trim);
}
}
sync_trim_providers();
if (!maybe_wait_for_quorum.empty()) {
finish_contexts(g_ceph_context, maybe_wait_for_quorum);
}
if (is_leader() && paxos->is_active() && fingerprint.is_zero()) {
// this is only necessary on upgraded clusters.
MonitorDBStore::TransactionRef t = paxos->get_pending_transaction();
prepare_new_fingerprint(t);
paxos->trigger_propose();
}
mgr_client.update_daemon_health(get_health_metrics());
new_tick();
}
vector<DaemonHealthMetric> Monitor::get_health_metrics()
{
vector<DaemonHealthMetric> metrics;
utime_t oldest_secs;
const utime_t now = ceph_clock_now();
auto too_old = now;
too_old -= g_conf().get_val<std::chrono::seconds>("mon_op_complaint_time").count();
int slow = 0;
TrackedOpRef oldest_op;
auto count_slow_ops = [&](TrackedOp& op) {
if (op.get_initiated() < too_old) {
slow++;
if (!oldest_op || op.get_initiated() < oldest_op->get_initiated()) {
oldest_op = &op;
}
return true;
} else {
return false;
}
};
if (op_tracker.visit_ops_in_flight(&oldest_secs, count_slow_ops)) {
if (slow) {
derr << __func__ << " reporting " << slow << " slow ops, oldest is "
<< oldest_op->get_desc() << dendl;
}
metrics.emplace_back(daemon_metric::SLOW_OPS, slow, oldest_secs);
} else {
metrics.emplace_back(daemon_metric::SLOW_OPS, 0, 0);
}
return metrics;
}
void Monitor::prepare_new_fingerprint(MonitorDBStore::TransactionRef t)
{
uuid_d nf;
nf.generate_random();
dout(10) << __func__ << " proposing cluster_fingerprint " << nf << dendl;
bufferlist bl;
encode(nf, bl);
t->put(MONITOR_NAME, "cluster_fingerprint", bl);
}
int Monitor::check_fsid()
{
bufferlist ebl;
int r = store->get(MONITOR_NAME, "cluster_uuid", ebl);
if (r == -ENOENT)
return r;
ceph_assert(r == 0);
string es(ebl.c_str(), ebl.length());
// only keep the first line
size_t pos = es.find_first_of('\n');
if (pos != string::npos)
es.resize(pos);
dout(10) << "check_fsid cluster_uuid contains '" << es << "'" << dendl;
uuid_d ondisk;
if (!ondisk.parse(es.c_str())) {
derr << "error: unable to parse uuid" << dendl;
return -EINVAL;
}
if (monmap->get_fsid() != ondisk) {
derr << "error: cluster_uuid file exists with value " << ondisk
<< ", != our uuid " << monmap->get_fsid() << dendl;
return -EEXIST;
}
return 0;
}
int Monitor::write_fsid()
{
auto t(std::make_shared<MonitorDBStore::Transaction>());
write_fsid(t);
int r = store->apply_transaction(t);
return r;
}
int Monitor::write_fsid(MonitorDBStore::TransactionRef t)
{
ostringstream ss;
ss << monmap->get_fsid() << "\n";
string us = ss.str();
bufferlist b;
b.append(us);
t->put(MONITOR_NAME, "cluster_uuid", b);
return 0;
}
/*
* this is the closest thing to a traditional 'mkfs' for ceph.
* initialize the monitor state machines to their initial values.
*/
int Monitor::mkfs(bufferlist& osdmapbl)
{
auto t(std::make_shared<MonitorDBStore::Transaction>());
// verify cluster fsid
int r = check_fsid();
if (r < 0 && r != -ENOENT)
return r;
bufferlist magicbl;
magicbl.append(CEPH_MON_ONDISK_MAGIC);
magicbl.append("\n");
t->put(MONITOR_NAME, "magic", magicbl);
features = get_initial_supported_features();
write_features(t);
// save monmap, osdmap, keyring.
bufferlist monmapbl;
monmap->encode(monmapbl, CEPH_FEATURES_ALL);
monmap->set_epoch(0); // must be 0 to avoid confusing first MonmapMonitor::update_from_paxos()
t->put("mkfs", "monmap", monmapbl);
if (osdmapbl.length()) {
// make sure it's a valid osdmap
try {
OSDMap om;
om.decode(osdmapbl);
}
catch (ceph::buffer::error& e) {
derr << "error decoding provided osdmap: " << e.what() << dendl;
return -EINVAL;
}
t->put("mkfs", "osdmap", osdmapbl);
}
if (is_keyring_required()) {
KeyRing keyring;
string keyring_filename;
r = ceph_resolve_file_search(g_conf()->keyring, keyring_filename);
if (r) {
if (g_conf()->key != "") {
string keyring_plaintext = "[mon.]\n\tkey = " + g_conf()->key +
"\n\tcaps mon = \"allow *\"\n";
bufferlist bl;
bl.append(keyring_plaintext);
try {
auto i = bl.cbegin();
keyring.decode(i);
}
catch (const ceph::buffer::error& e) {
derr << "error decoding keyring " << keyring_plaintext
<< ": " << e.what() << dendl;
return -EINVAL;
}
} else {
derr << "unable to find a keyring on " << g_conf()->keyring
<< ": " << cpp_strerror(r) << dendl;
return r;
}
} else {
r = keyring.load(g_ceph_context, keyring_filename);
if (r < 0) {
derr << "unable to load initial keyring " << g_conf()->keyring << dendl;
return r;
}
}
// put mon. key in external keyring; seed with everything else.
extract_save_mon_key(keyring);
bufferlist keyringbl;
keyring.encode_plaintext(keyringbl);
t->put("mkfs", "keyring", keyringbl);
}
write_fsid(t);
store->apply_transaction(t);
return 0;
}
int Monitor::write_default_keyring(bufferlist& bl)
{
ostringstream os;
os << g_conf()->mon_data << "/keyring";
int err = 0;
int fd = ::open(os.str().c_str(), O_WRONLY|O_CREAT|O_CLOEXEC, 0600);
if (fd < 0) {
err = -errno;
dout(0) << __func__ << " failed to open " << os.str()
<< ": " << cpp_strerror(err) << dendl;
return err;
}
err = bl.write_fd(fd);
if (!err)
::fsync(fd);
VOID_TEMP_FAILURE_RETRY(::close(fd));
return err;
}
void Monitor::extract_save_mon_key(KeyRing& keyring)
{
EntityName mon_name;
mon_name.set_type(CEPH_ENTITY_TYPE_MON);
EntityAuth mon_key;
if (keyring.get_auth(mon_name, mon_key)) {
dout(10) << "extract_save_mon_key moving mon. key to separate keyring" << dendl;
KeyRing pkey;
pkey.add(mon_name, mon_key);
bufferlist bl;
pkey.encode_plaintext(bl);
write_default_keyring(bl);
keyring.remove(mon_name);
}
}
// AuthClient methods -- for mon <-> mon communication
int Monitor::get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *method,
vector<uint32_t> *preferred_modes,
bufferlist *out)
{
std::scoped_lock l(auth_lock);
if (con->get_peer_type() != CEPH_ENTITY_TYPE_MON &&
con->get_peer_type() != CEPH_ENTITY_TYPE_MGR) {
return -EACCES;
}
AuthAuthorizer *auth;
if (!get_authorizer(con->get_peer_type(), &auth)) {
return -EACCES;
}
auth_meta->authorizer.reset(auth);
auth_registry.get_supported_modes(con->get_peer_type(),
auth->protocol,
preferred_modes);
*method = auth->protocol;
*out = auth->bl;
return 0;
}
int Monitor::handle_auth_reply_more(
Connection *con,
AuthConnectionMeta *auth_meta,
const bufferlist& bl,
bufferlist *reply)
{
std::scoped_lock l(auth_lock);
if (!auth_meta->authorizer) {
derr << __func__ << " no authorizer?" << dendl;
return -EACCES;
}
auth_meta->authorizer->add_challenge(cct, bl);
*reply = auth_meta->authorizer->bl;
return 0;
}
int Monitor::handle_auth_done(
Connection *con,
AuthConnectionMeta *auth_meta,
uint64_t global_id,
uint32_t con_mode,
const bufferlist& bl,
CryptoKey *session_key,
std::string *connection_secret)
{
std::scoped_lock l(auth_lock);
// verify authorizer reply
auto p = bl.begin();
if (!auth_meta->authorizer->verify_reply(p, connection_secret)) {
dout(0) << __func__ << " failed verifying authorizer reply" << dendl;
return -EACCES;
}
auth_meta->session_key = auth_meta->authorizer->session_key;
return 0;
}
int Monitor::handle_auth_bad_method(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes)
{
derr << __func__ << " hmm, they didn't like " << old_auth_method
<< " result " << cpp_strerror(result) << dendl;
return -EACCES;
}
bool Monitor::get_authorizer(int service_id, AuthAuthorizer **authorizer)
{
dout(10) << "get_authorizer for " << ceph_entity_type_name(service_id)
<< dendl;
if (is_shutdown())
return false;
// we only connect to other monitors and mgr; every else connects to us.
if (service_id != CEPH_ENTITY_TYPE_MON &&
service_id != CEPH_ENTITY_TYPE_MGR)
return false;
if (!auth_cluster_required.is_supported_auth(CEPH_AUTH_CEPHX)) {
// auth_none
dout(20) << __func__ << " building auth_none authorizer" << dendl;
AuthNoneClientHandler handler{g_ceph_context};
handler.set_global_id(0);
*authorizer = handler.build_authorizer(service_id);
return true;
}
CephXServiceTicketInfo auth_ticket_info;
CephXSessionAuthInfo info;
int ret;
EntityName name;
name.set_type(CEPH_ENTITY_TYPE_MON);
auth_ticket_info.ticket.name = name;
auth_ticket_info.ticket.global_id = 0;
if (service_id == CEPH_ENTITY_TYPE_MON) {
// mon to mon authentication uses the private monitor shared key and not the
// rotating key
CryptoKey secret;
if (!keyring.get_secret(name, secret) &&
!key_server.get_secret(name, secret)) {
dout(0) << " couldn't get secret for mon service from keyring or keyserver"
<< dendl;
stringstream ss, ds;
int err = key_server.list_secrets(ds);
if (err < 0)
ss << "no installed auth entries!";
else
ss << "installed auth entries:";
dout(0) << ss.str() << "\n" << ds.str() << dendl;
return false;
}
ret = key_server.build_session_auth_info(
service_id, auth_ticket_info.ticket, secret, (uint64_t)-1, info);
if (ret < 0) {
dout(0) << __func__ << " failed to build mon session_auth_info "
<< cpp_strerror(ret) << dendl;
return false;
}
} else if (service_id == CEPH_ENTITY_TYPE_MGR) {
// mgr
ret = key_server.build_session_auth_info(
service_id, auth_ticket_info.ticket, info);
if (ret < 0) {
derr << __func__ << " failed to build mgr service session_auth_info "
<< cpp_strerror(ret) << dendl;
return false;
}
} else {
ceph_abort(); // see check at top of fn
}
CephXTicketBlob blob;
if (!cephx_build_service_ticket_blob(cct, info, blob)) {
dout(0) << "get_authorizer failed to build service ticket" << dendl;
return false;
}
bufferlist ticket_data;
encode(blob, ticket_data);
auto iter = ticket_data.cbegin();
CephXTicketHandler handler(g_ceph_context, service_id);
decode(handler.ticket, iter);
handler.session_key = info.session_key;
*authorizer = handler.build_authorizer(0);
return true;
}
int Monitor::handle_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
bool more,
uint32_t auth_method,
const bufferlist &payload,
bufferlist *reply)
{
std::scoped_lock l(auth_lock);
// NOTE: be careful, the Connection hasn't fully negotiated yet, so
// e.g., peer_features, peer_addrs, and others are still unknown.
dout(10) << __func__ << " con " << con << (more ? " (more)":" (start)")
<< " method " << auth_method
<< " payload " << payload.length()
<< dendl;
if (!payload.length()) {
if (!con->is_msgr2() &&
con->get_peer_type() != CEPH_ENTITY_TYPE_MON) {
// for v1 connections, we tolerate no authorizer (from
// non-monitors), because authentication happens via MAuth
// messages.
return 1;
}
return -EACCES;
}
if (!more) {
auth_meta->auth_mode = payload[0];
}
if (auth_meta->auth_mode >= AUTH_MODE_AUTHORIZER &&
auth_meta->auth_mode <= AUTH_MODE_AUTHORIZER_MAX) {
AuthAuthorizeHandler *ah = get_auth_authorize_handler(con->get_peer_type(),
auth_method);
if (!ah) {
lderr(cct) << __func__ << " no AuthAuthorizeHandler found for auth method "
<< auth_method << dendl;
return -EOPNOTSUPP;
}
bool was_challenge = (bool)auth_meta->authorizer_challenge;
bool isvalid = ah->verify_authorizer(
cct,
keyring,
payload,
auth_meta->get_connection_secret_length(),
reply,
&con->peer_name,
&con->peer_global_id,
&con->peer_caps_info,
&auth_meta->session_key,
&auth_meta->connection_secret,
&auth_meta->authorizer_challenge);
if (isvalid) {
ms_handle_authentication(con);
return 1;
}
if (!more && !was_challenge && auth_meta->authorizer_challenge) {
return 0;
}
dout(10) << __func__ << " bad authorizer on " << con << dendl;
return -EACCES;
} else if (auth_meta->auth_mode < AUTH_MODE_MON ||
auth_meta->auth_mode > AUTH_MODE_MON_MAX) {
derr << __func__ << " unrecognized auth mode " << auth_meta->auth_mode
<< dendl;
return -EACCES;
}
// wait until we've formed an initial quorum on mkfs so that we have
// the initial keys (e.g., client.admin).
if (authmon()->get_last_committed() == 0) {
dout(10) << __func__ << " haven't formed initial quorum, EBUSY" << dendl;
return -EBUSY;
}
RefCountedPtr priv;
MonSession *s;
int32_t r = 0;
auto p = payload.begin();
if (!more) {
if (con->get_priv()) {
return -EACCES; // wtf
}
// handler?
unique_ptr<AuthServiceHandler> auth_handler{get_auth_service_handler(
auth_method, g_ceph_context, &key_server)};
if (!auth_handler) {
dout(1) << __func__ << " auth_method " << auth_method << " not supported"
<< dendl;
return -EOPNOTSUPP;
}
uint8_t mode;
EntityName entity_name;
try {
decode(mode, p);
if (mode < AUTH_MODE_MON ||
mode > AUTH_MODE_MON_MAX) {
dout(1) << __func__ << " invalid mode " << (int)mode << dendl;
return -EACCES;
}
assert(mode >= AUTH_MODE_MON && mode <= AUTH_MODE_MON_MAX);
decode(entity_name, p);
decode(con->peer_global_id, p);
} catch (ceph::buffer::error& e) {
dout(1) << __func__ << " failed to decode, " << e.what() << dendl;
return -EACCES;
}
// supported method?
if (entity_name.get_type() == CEPH_ENTITY_TYPE_MON ||
entity_name.get_type() == CEPH_ENTITY_TYPE_OSD ||
entity_name.get_type() == CEPH_ENTITY_TYPE_MDS ||
entity_name.get_type() == CEPH_ENTITY_TYPE_MGR) {
if (!auth_cluster_required.is_supported_auth(auth_method)) {
dout(10) << __func__ << " entity " << entity_name << " method "
<< auth_method << " not among supported "
<< auth_cluster_required.get_supported_set() << dendl;
return -EOPNOTSUPP;
}
} else {
if (!auth_service_required.is_supported_auth(auth_method)) {
dout(10) << __func__ << " entity " << entity_name << " method "
<< auth_method << " not among supported "
<< auth_cluster_required.get_supported_set() << dendl;
return -EOPNOTSUPP;
}
}
// for msgr1 we would do some weirdness here to ensure signatures
// are supported by the client if we require it. for msgr2 that
// is not necessary.
bool is_new_global_id = false;
if (!con->peer_global_id) {
con->peer_global_id = authmon()->_assign_global_id();
if (!con->peer_global_id) {
dout(1) << __func__ << " failed to assign global_id" << dendl;
return -EBUSY;
}
is_new_global_id = true;
}
// set up partial session
s = new MonSession(con);
s->auth_handler = auth_handler.release();
con->set_priv(RefCountedPtr{s, false});
r = s->auth_handler->start_session(
entity_name,
con->peer_global_id,
is_new_global_id,
reply,
&con->peer_caps_info);
} else {
priv = con->get_priv();
if (!priv) {
// this can happen if the async ms_handle_reset event races with
// the unlocked call into handle_auth_request
return -EACCES;
}
s = static_cast<MonSession*>(priv.get());
r = s->auth_handler->handle_request(
p,
auth_meta->get_connection_secret_length(),
reply,
&con->peer_caps_info,
&auth_meta->session_key,
&auth_meta->connection_secret);
}
if (r > 0 &&
!s->authenticated) {
ms_handle_authentication(con);
}
dout(30) << " r " << r << " reply:\n";
reply->hexdump(*_dout);
*_dout << dendl;
return r;
}
void Monitor::ms_handle_accept(Connection *con)
{
auto priv = con->get_priv();
MonSession *s = static_cast<MonSession*>(priv.get());
if (!s) {
// legacy protocol v1?
dout(10) << __func__ << " con " << con << " no session" << dendl;
return;
}
if (s->item.is_on_list()) {
dout(10) << __func__ << " con " << con << " session " << s
<< " already on list" << dendl;
} else {
std::lock_guard l(session_map_lock);
if (state == STATE_SHUTDOWN) {
dout(10) << __func__ << " ignoring new con " << con << " (shutdown)" << dendl;
con->mark_down();
return;
}
dout(10) << __func__ << " con " << con << " session " << s
<< " registering session for "
<< con->get_peer_addrs() << dendl;
s->_ident(entity_name_t(con->get_peer_type(), con->get_peer_id()),
con->get_peer_addrs());
session_map.add_session(s);
}
}
int Monitor::ms_handle_authentication(Connection *con)
{
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
// mon <-> mon connections need no Session, and setting one up
// creates an awkward ref cycle between Session and Connection.
return 1;
}
auto priv = con->get_priv();
MonSession *s = static_cast<MonSession*>(priv.get());
if (!s) {
// must be msgr2, otherwise dispatch would have set up the session.
s = session_map.new_session(
entity_name_t(con->get_peer_type(), -1), // we don't know yet
con->get_peer_addrs(),
con);
assert(s);
dout(10) << __func__ << " adding session " << s << " to con " << con
<< dendl;
con->set_priv(s);
logger->set(l_mon_num_sessions, session_map.get_size());
logger->inc(l_mon_session_add);
}
dout(10) << __func__ << " session " << s << " con " << con
<< " addr " << s->con->get_peer_addr()
<< " " << *s << dendl;
AuthCapsInfo &caps_info = con->get_peer_caps_info();
int ret = 0;
if (caps_info.allow_all) {
s->caps.set_allow_all();
s->authenticated = true;
ret = 1;
} else if (caps_info.caps.length()) {
bufferlist::const_iterator p = caps_info.caps.cbegin();
string str;
try {
decode(str, p);
} catch (const ceph::buffer::error &err) {
derr << __func__ << " corrupt cap data for " << con->get_peer_entity_name()
<< " in auth db" << dendl;
str.clear();
ret = -EACCES;
}
if (ret >= 0) {
if (s->caps.parse(str, NULL)) {
s->authenticated = true;
ret = 1;
} else {
derr << __func__ << " unparseable caps '" << str << "' for "
<< con->get_peer_entity_name() << dendl;
ret = -EACCES;
}
}
}
return ret;
}
void Monitor::set_mon_crush_location(const string& loc)
{
if (loc.empty()) {
return;
}
vector<string> loc_vec;
loc_vec.push_back(loc);
CrushWrapper::parse_loc_map(loc_vec, &crush_loc);
need_set_crush_loc = true;
}
void Monitor::notify_new_monmap(bool can_change_external_state, bool remove_rank_elector)
{
if (need_set_crush_loc) {
auto my_info_i = monmap->mon_info.find(name);
if (my_info_i != monmap->mon_info.end() &&
my_info_i->second.crush_loc == crush_loc) {
need_set_crush_loc = false;
}
}
elector.notify_strategy_maybe_changed(monmap->strategy);
if (remove_rank_elector){
dout(10) << __func__ << " we have " << monmap->ranks.size()<< " ranks" << dendl;
dout(10) << __func__ << " we have " << monmap->removed_ranks.size() << " removed ranks" << dendl;
for (auto i = monmap->removed_ranks.rbegin();
i != monmap->removed_ranks.rend(); ++i) {
int remove_rank = *i;
dout(10) << __func__ << " removing rank " << remove_rank << dendl;
if (rank == remove_rank) {
dout(5) << "We are removing our own rank, probably we"
<< " are removed from monmap before we shutdown ... dropping." << dendl;
continue;
}
int new_rank = monmap->get_rank(messenger->get_myaddrs());
if (new_rank == -1) {
dout(5) << "We no longer exists in the monmap! ... dropping." << dendl;
continue;
}
elector.notify_rank_removed(remove_rank, new_rank);
}
}
if (monmap->stretch_mode_enabled) {
try_engage_stretch_mode();
}
if (is_stretch_mode()) {
if (!monmap->stretch_marked_down_mons.empty()) {
dout(20) << __func__ << " stretch_marked_down_mons: " << monmap->stretch_marked_down_mons << dendl;
set_degraded_stretch_mode();
}
}
set_elector_disallowed_leaders(can_change_external_state);
}
void Monitor::set_elector_disallowed_leaders(bool allow_election)
{
set<int> dl;
for (auto name : monmap->disallowed_leaders) {
dl.insert(monmap->get_rank(name));
}
if (is_stretch_mode()) {
for (auto name : monmap->stretch_marked_down_mons) {
dl.insert(monmap->get_rank(name));
}
dl.insert(monmap->get_rank(monmap->tiebreaker_mon));
}
bool disallowed_changed = elector.set_disallowed_leaders(dl);
if (disallowed_changed && allow_election) {
elector.call_election();
}
}
struct CMonEnableStretchMode : public Context {
Monitor *m;
CMonEnableStretchMode(Monitor *mon) : m(mon) {}
void finish(int r) {
m->try_engage_stretch_mode();
}
};
void Monitor::try_engage_stretch_mode()
{
dout(20) << __func__ << dendl;
if (stretch_mode_engaged) return;
if (!osdmon()->is_readable()) {
dout(20) << "osdmon is not readable" << dendl;
osdmon()->wait_for_readable_ctx(new CMonEnableStretchMode(this));
return;
}
if (osdmon()->osdmap.stretch_mode_enabled &&
monmap->stretch_mode_enabled) {
dout(10) << "Engaging stretch mode!" << dendl;
stretch_mode_engaged = true;
int32_t stretch_divider_id = osdmon()->osdmap.stretch_mode_bucket;
stretch_bucket_divider = osdmon()->osdmap.
crush->get_type_name(stretch_divider_id);
disconnect_disallowed_stretch_sessions();
}
}
void Monitor::do_stretch_mode_election_work()
{
dout(20) << __func__ << dendl;
if (!is_stretch_mode() ||
!is_leader()) return;
dout(20) << "checking for degraded stretch mode" << dendl;
map<string, set<string>> old_dead_buckets;
old_dead_buckets.swap(dead_mon_buckets);
up_mon_buckets.clear();
// identify if we've lost a CRUSH bucket, request OSDMonitor check for death
map<string,set<string>> down_mon_buckets;
for (unsigned i = 0; i < monmap->size(); ++i) {
const auto &mi = monmap->mon_info[monmap->get_name(i)];
auto ci = mi.crush_loc.find(stretch_bucket_divider);
ceph_assert(ci != mi.crush_loc.end());
if (quorum.count(i)) {
up_mon_buckets.insert(ci->second);
} else {
down_mon_buckets[ci->second].insert(mi.name);
}
}
dout(20) << "prior dead_mon_buckets: " << old_dead_buckets
<< "; down_mon_buckets: " << down_mon_buckets
<< "; up_mon_buckets: " << up_mon_buckets << dendl;
for (const auto& di : down_mon_buckets) {
if (!up_mon_buckets.count(di.first)) {
dead_mon_buckets[di.first] = di.second;
}
}
dout(20) << "new dead_mon_buckets " << dead_mon_buckets << dendl;
if (dead_mon_buckets != old_dead_buckets &&
dead_mon_buckets.size() >= old_dead_buckets.size()) {
maybe_go_degraded_stretch_mode();
}
}
struct CMonGoDegraded : public Context {
Monitor *m;
CMonGoDegraded(Monitor *mon) : m(mon) {}
void finish(int r) {
m->maybe_go_degraded_stretch_mode();
}
};
struct CMonGoRecovery : public Context {
Monitor *m;
CMonGoRecovery(Monitor *mon) : m(mon) {}
void finish(int r) {
m->go_recovery_stretch_mode();
}
};
void Monitor::go_recovery_stretch_mode()
{
dout(20) << __func__ << dendl;
dout(20) << "is_leader(): " << is_leader() << dendl;
if (!is_leader()) return;
dout(20) << "is_degraded_stretch_mode(): " << is_degraded_stretch_mode() << dendl;
if (!is_degraded_stretch_mode()) return;
dout(20) << "is_recovering_stretch_mode(): " << is_recovering_stretch_mode() << dendl;
if (is_recovering_stretch_mode()) return;
dout(20) << "dead_mon_buckets.size(): " << dead_mon_buckets.size() << dendl;
dout(20) << "dead_mon_buckets: " << dead_mon_buckets << dendl;
if (dead_mon_buckets.size()) {
ceph_assert( 0 == "how did we try and do stretch recovery while we have dead monitor buckets?");
// we can't recover if we are missing monitors in a zone!
return;
}
if (!osdmon()->is_readable()) {
dout(20) << "osdmon is not readable" << dendl;
osdmon()->wait_for_readable_ctx(new CMonGoRecovery(this));
return;
}
if (!osdmon()->is_writeable()) {
dout(20) << "osdmon is not writeable" << dendl;
osdmon()->wait_for_writeable_ctx(new CMonGoRecovery(this));
return;
}
osdmon()->trigger_recovery_stretch_mode();
}
void Monitor::set_recovery_stretch_mode()
{
degraded_stretch_mode = true;
recovering_stretch_mode = true;
osdmon()->set_recovery_stretch_mode();
}
void Monitor::maybe_go_degraded_stretch_mode()
{
dout(20) << __func__ << dendl;
if (is_degraded_stretch_mode()) return;
if (!is_leader()) return;
if (dead_mon_buckets.empty()) return;
if (!osdmon()->is_readable()) {
osdmon()->wait_for_readable_ctx(new CMonGoDegraded(this));
return;
}
ceph_assert(monmap->contains(monmap->tiebreaker_mon));
// filter out the tiebreaker zone and check if remaining sites are down by OSDs too
const auto &mi = monmap->mon_info[monmap->tiebreaker_mon];
auto ci = mi.crush_loc.find(stretch_bucket_divider);
map<string, set<string>> filtered_dead_buckets = dead_mon_buckets;
filtered_dead_buckets.erase(ci->second);
set<int> matched_down_buckets;
set<string> matched_down_mons;
bool dead = osdmon()->check_for_dead_crush_zones(filtered_dead_buckets,
&matched_down_buckets,
&matched_down_mons);
if (dead) {
if (!osdmon()->is_writeable()) {
dout(20) << "osdmon is not writeable" << dendl;
osdmon()->wait_for_writeable_ctx(new CMonGoDegraded(this));
return;
}
if (!monmon()->is_writeable()) {
dout(20) << "monmon is not writeable" << dendl;
monmon()->wait_for_writeable_ctx(new CMonGoDegraded(this));
return;
}
trigger_degraded_stretch_mode(matched_down_mons, matched_down_buckets);
}
}
void Monitor::trigger_degraded_stretch_mode(const set<string>& dead_mons,
const set<int>& dead_buckets)
{
dout(20) << __func__ << dendl;
ceph_assert(osdmon()->is_writeable());
ceph_assert(monmon()->is_writeable());
// figure out which OSD zone(s) remains alive by removing
// tiebreaker mon from up_mon_buckets
set<string> live_zones = up_mon_buckets;
ceph_assert(monmap->contains(monmap->tiebreaker_mon));
const auto &mi = monmap->mon_info[monmap->tiebreaker_mon];
auto ci = mi.crush_loc.find(stretch_bucket_divider);
live_zones.erase(ci->second);
ceph_assert(live_zones.size() == 1); // only support 2 zones right now
osdmon()->trigger_degraded_stretch_mode(dead_buckets, live_zones);
monmon()->trigger_degraded_stretch_mode(dead_mons);
set_degraded_stretch_mode();
}
void Monitor::set_degraded_stretch_mode()
{
dout(20) << __func__ << dendl;
degraded_stretch_mode = true;
recovering_stretch_mode = false;
osdmon()->set_degraded_stretch_mode();
}
struct CMonGoHealthy : public Context {
Monitor *m;
CMonGoHealthy(Monitor *mon) : m(mon) {}
void finish(int r) {
m->trigger_healthy_stretch_mode();
}
};
void Monitor::trigger_healthy_stretch_mode()
{
dout(20) << __func__ << dendl;
if (!is_degraded_stretch_mode()) return;
if (!is_leader()) return;
if (!osdmon()->is_writeable()) {
dout(20) << "osdmon is not writeable" << dendl;
osdmon()->wait_for_writeable_ctx(new CMonGoHealthy(this));
return;
}
if (!monmon()->is_writeable()) {
dout(20) << "monmon is not writeable" << dendl;
monmon()->wait_for_writeable_ctx(new CMonGoHealthy(this));
return;
}
ceph_assert(osdmon()->osdmap.recovering_stretch_mode);
osdmon()->trigger_healthy_stretch_mode();
monmon()->trigger_healthy_stretch_mode();
}
void Monitor::set_healthy_stretch_mode()
{
degraded_stretch_mode = false;
recovering_stretch_mode = false;
osdmon()->set_healthy_stretch_mode();
}
bool Monitor::session_stretch_allowed(MonSession *s, MonOpRequestRef& op)
{
if (!is_stretch_mode()) return true;
if (s->proxy_con) return true;
if (s->validated_stretch_connection) return true;
if (!s->con) return true;
if (s->con->peer_is_osd()) {
dout(20) << __func__ << "checking OSD session" << s << dendl;
// okay, check the crush location
int barrier_id = [&] {
auto type_id = osdmon()->osdmap.crush->get_validated_type_id(
stretch_bucket_divider);
ceph_assert(type_id.has_value());
return *type_id;
}();
int osd_bucket_id = osdmon()->osdmap.crush->get_parent_of_type(s->con->peer_id,
barrier_id);
const auto &mi = monmap->mon_info.find(name);
ceph_assert(mi != monmap->mon_info.end());
auto ci = mi->second.crush_loc.find(stretch_bucket_divider);
ceph_assert(ci != mi->second.crush_loc.end());
int mon_bucket_id = osdmon()->osdmap.crush->get_item_id(ci->second);
if (osd_bucket_id != mon_bucket_id) {
dout(5) << "discarding session " << *s
<< " and sending OSD to matched zone" << dendl;
s->con->mark_down();
std::lock_guard l(session_map_lock);
remove_session(s);
if (op) {
op->mark_zap();
}
return false;
}
}
s->validated_stretch_connection = true;
return true;
}
void Monitor::disconnect_disallowed_stretch_sessions()
{
dout(20) << __func__ << dendl;
MonOpRequestRef blank;
auto i = session_map.sessions.begin();
while (i != session_map.sessions.end()) {
auto j = i;
++i;
session_stretch_allowed(*j, blank);
}
}
| 208,966 | 29.097508 | 125 |
cc
|
null |
ceph-main/src/mon/Monitor.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/*
* This is the top level monitor. It runs on each machine in the Monitor
* Cluster. The election of a leader for the paxos algorithm only happens
* once per machine via the elector. There is a separate paxos instance (state)
* kept for each of the system components: Object Store Device (OSD) Monitor,
* Placement Group (PG) Monitor, Metadata Server (MDS) Monitor, and Client Monitor.
*/
#ifndef CEPH_MONITOR_H
#define CEPH_MONITOR_H
#include <errno.h>
#include <cmath>
#include <string>
#include <array>
#include "include/types.h"
#include "include/health.h"
#include "msg/Messenger.h"
#include "common/Timer.h"
#include "health_check.h"
#include "MonMap.h"
#include "Elector.h"
#include "Paxos.h"
#include "Session.h"
#include "MonCommand.h"
#include "common/config_obs.h"
#include "common/LogClient.h"
#include "auth/AuthClient.h"
#include "auth/AuthServer.h"
#include "auth/cephx/CephxKeyServer.h"
#include "auth/AuthMethodList.h"
#include "auth/KeyRing.h"
#include "include/common_fwd.h"
#include "messages/MMonCommand.h"
#include "mon/MonitorDBStore.h"
#include "mgr/MgrClient.h"
#include "mon/MonOpRequest.h"
#include "common/WorkQueue.h"
using namespace TOPNSPC::common;
#define CEPH_MON_PROTOCOL 13 /* cluster internal */
enum {
l_cluster_first = 555000,
l_cluster_num_mon,
l_cluster_num_mon_quorum,
l_cluster_num_osd,
l_cluster_num_osd_up,
l_cluster_num_osd_in,
l_cluster_osd_epoch,
l_cluster_osd_bytes,
l_cluster_osd_bytes_used,
l_cluster_osd_bytes_avail,
l_cluster_num_pool,
l_cluster_num_pg,
l_cluster_num_pg_active_clean,
l_cluster_num_pg_active,
l_cluster_num_pg_peering,
l_cluster_num_object,
l_cluster_num_object_degraded,
l_cluster_num_object_misplaced,
l_cluster_num_object_unfound,
l_cluster_num_bytes,
l_cluster_last,
};
enum {
l_mon_first = 456000,
l_mon_num_sessions,
l_mon_session_add,
l_mon_session_rm,
l_mon_session_trim,
l_mon_num_elections,
l_mon_election_call,
l_mon_election_win,
l_mon_election_lose,
l_mon_last,
};
class PaxosService;
class AdminSocketHook;
#define COMPAT_SET_LOC "feature_set"
class Monitor : public Dispatcher,
public AuthClient,
public AuthServer,
public md_config_obs_t {
public:
int orig_argc = 0;
const char **orig_argv = nullptr;
// me
std::string name;
int rank;
Messenger *messenger;
ConnectionRef con_self;
ceph::mutex lock = ceph::make_mutex("Monitor::lock");
SafeTimer timer;
Finisher finisher;
ThreadPool cpu_tp; ///< threadpool for CPU intensive work
ceph::mutex auth_lock = ceph::make_mutex("Monitor::auth_lock");
/// true if we have ever joined a quorum. if false, we are either a
/// new cluster, a newly joining monitor, or a just-upgraded
/// monitor.
bool has_ever_joined;
PerfCounters *logger, *cluster_logger;
bool cluster_logger_registered;
void register_cluster_logger();
void unregister_cluster_logger();
MonMap *monmap;
uuid_d fingerprint;
std::set<entity_addrvec_t> extra_probe_peers;
LogClient log_client;
LogChannelRef clog;
LogChannelRef audit_clog;
KeyRing keyring;
KeyServer key_server;
AuthMethodList auth_cluster_required;
AuthMethodList auth_service_required;
CompatSet features;
std::vector<MonCommand> leader_mon_commands; // quorum leader's commands
std::vector<MonCommand> local_mon_commands; // commands i support
ceph::buffer::list local_mon_commands_bl; // encoded version of above
std::vector<MonCommand> prenautilus_local_mon_commands;
ceph::buffer::list prenautilus_local_mon_commands_bl;
Messenger *mgr_messenger;
MgrClient mgr_client;
uint64_t mgr_proxy_bytes = 0; // in-flight proxied mgr command message bytes
std::string gss_ktfile_client{};
private:
void new_tick();
// -- local storage --
public:
MonitorDBStore *store;
static const std::string MONITOR_NAME;
static const std::string MONITOR_STORE_PREFIX;
// -- monitor state --
private:
enum {
STATE_INIT = 1,
STATE_PROBING,
STATE_SYNCHRONIZING,
STATE_ELECTING,
STATE_LEADER,
STATE_PEON,
STATE_SHUTDOWN
};
int state = STATE_INIT;
public:
static const char *get_state_name(int s) {
switch (s) {
case STATE_PROBING: return "probing";
case STATE_SYNCHRONIZING: return "synchronizing";
case STATE_ELECTING: return "electing";
case STATE_LEADER: return "leader";
case STATE_PEON: return "peon";
case STATE_SHUTDOWN: return "shutdown";
default: return "???";
}
}
const char *get_state_name() const {
return get_state_name(state);
}
bool is_init() const { return state == STATE_INIT; }
bool is_shutdown() const { return state == STATE_SHUTDOWN; }
bool is_probing() const { return state == STATE_PROBING; }
bool is_synchronizing() const { return state == STATE_SYNCHRONIZING; }
bool is_electing() const { return state == STATE_ELECTING; }
bool is_leader() const { return state == STATE_LEADER; }
bool is_peon() const { return state == STATE_PEON; }
const utime_t &get_leader_since() const;
void prepare_new_fingerprint(MonitorDBStore::TransactionRef t);
std::vector<DaemonHealthMetric> get_health_metrics();
int quorum_age() const {
auto age = std::chrono::duration_cast<std::chrono::seconds>(
ceph::mono_clock::now() - quorum_since);
return age.count();
}
bool is_mon_down() const {
int max = monmap->size();
int actual = get_quorum().size();
auto now = ceph::real_clock::now();
return actual < max && now > monmap->created.to_real_time();
}
// -- elector --
private:
std::unique_ptr<Paxos> paxos;
Elector elector;
friend class Elector;
/// features we require of peers (based on on-disk compatset)
uint64_t required_features;
int leader; // current leader (to best of knowledge)
std::set<int> quorum; // current active set of monitors (if !starting)
ceph::mono_clock::time_point quorum_since; // when quorum formed
utime_t leader_since; // when this monitor became the leader, if it is the leader
utime_t exited_quorum; // time detected as not in quorum; 0 if in
// map of counts of connected clients, by type and features, for
// each quorum mon
std::map<int,FeatureMap> quorum_feature_map;
/**
* Intersection of quorum member's connection feature bits.
*/
uint64_t quorum_con_features;
/**
* Intersection of quorum members mon-specific feature bits
*/
mon_feature_t quorum_mon_features;
ceph_release_t quorum_min_mon_release{ceph_release_t::unknown};
std::set<std::string> outside_quorum;
bool stretch_mode_engaged{false};
bool degraded_stretch_mode{false};
bool recovering_stretch_mode{false};
std::string stretch_bucket_divider;
std::map<std::string, std::set<std::string>> dead_mon_buckets; // bucket->mon ranks, locations with no live mons
std::set<std::string> up_mon_buckets; // locations with a live mon
void do_stretch_mode_election_work();
bool session_stretch_allowed(MonSession *s, MonOpRequestRef& op);
void disconnect_disallowed_stretch_sessions();
void set_elector_disallowed_leaders(bool allow_election);
std::map<std::string,std::string> crush_loc;
bool need_set_crush_loc{false};
public:
bool is_stretch_mode() { return stretch_mode_engaged; }
bool is_degraded_stretch_mode() { return degraded_stretch_mode; }
bool is_recovering_stretch_mode() { return recovering_stretch_mode; }
/**
* This set of functions maintains the in-memory stretch state
* and sets up transitions of the map states by calling in to
* MonmapMonitor and OSDMonitor.
*
* The [maybe_]go_* functions are called on the leader to
* decide if transitions should happen; the trigger_* functions
* set up the map transitions; and the set_* functions actually
* change the memory state -- but these are only called
* via OSDMonitor::update_from_paxos, to guarantee consistent
* updates across the entire cluster.
*/
void try_engage_stretch_mode();
void maybe_go_degraded_stretch_mode();
void trigger_degraded_stretch_mode(const std::set<std::string>& dead_mons,
const std::set<int>& dead_buckets);
void set_degraded_stretch_mode();
void go_recovery_stretch_mode();
void set_recovery_stretch_mode();
void trigger_healthy_stretch_mode();
void set_healthy_stretch_mode();
void enable_stretch_mode();
void set_mon_crush_location(const std::string& loc);
private:
/**
* @defgroup Monitor_h_scrub
* @{
*/
version_t scrub_version; ///< paxos version we are scrubbing
std::map<int,ScrubResult> scrub_result; ///< results so far
/**
* trigger a cross-mon scrub
*
* Verify all mons are storing identical content
*/
int scrub_start();
int scrub();
void handle_scrub(MonOpRequestRef op);
bool _scrub(ScrubResult *r,
std::pair<std::string,std::string> *start,
int *num_keys);
void scrub_check_results();
void scrub_timeout();
void scrub_finish();
void scrub_reset();
void scrub_update_interval(ceph::timespan interval);
Context *scrub_event; ///< periodic event to trigger scrub (leader)
Context *scrub_timeout_event; ///< scrub round timeout (leader)
void scrub_event_start();
void scrub_event_cancel();
void scrub_reset_timeout();
void scrub_cancel_timeout();
struct ScrubState {
std::pair<std::string,std::string> last_key; ///< last scrubbed key
bool finished;
ScrubState() : finished(false) { }
virtual ~ScrubState() { }
};
std::shared_ptr<ScrubState> scrub_state; ///< keeps track of current scrub
/**
* @defgroup Monitor_h_sync Synchronization
* @{
*/
/**
* @} // provider state
*/
struct SyncProvider {
entity_addrvec_t addrs;
uint64_t cookie; ///< unique cookie for this sync attempt
utime_t timeout; ///< when we give up and expire this attempt
version_t last_committed; ///< last paxos version on peer
std::pair<std::string,std::string> last_key; ///< last key sent to (or on) peer
bool full; ///< full scan?
MonitorDBStore::Synchronizer synchronizer; ///< iterator
SyncProvider() : cookie(0), last_committed(0), full(false) {}
void reset_timeout(CephContext *cct, int grace) {
timeout = ceph_clock_now();
timeout += grace;
}
};
std::map<std::uint64_t, SyncProvider> sync_providers; ///< cookie -> SyncProvider for those syncing from us
uint64_t sync_provider_count; ///< counter for issued cookies to keep them unique
/**
* @} // requester state
*/
entity_addrvec_t sync_provider; ///< who we are syncing from
uint64_t sync_cookie; ///< 0 if we are starting, non-zero otherwise
bool sync_full; ///< true if we are a full sync, false for recent catch-up
version_t sync_start_version; ///< last_committed at sync start
Context *sync_timeout_event; ///< timeout event
/**
* floor for sync source
*
* When we sync we forget about our old last_committed value which
* can be dangerous. For example, if we have a cluster of:
*
* mon.a: lc 100
* mon.b: lc 80
* mon.c: lc 100 (us)
*
* If something forces us to sync (say, corruption, or manual
* intervention, or bug), we forget last_committed, and might abort.
* If mon.a happens to be down when we come back, we will see:
*
* mon.b: lc 80
* mon.c: lc 0 (us)
*
* and sync from mon.b, at which point a+b will both have lc 80 and
* come online with a majority holding out of date commits.
*
* Avoid this by preserving our old last_committed value prior to
* sync and never going backwards.
*/
version_t sync_last_committed_floor;
/**
* Obtain the synchronization target prefixes in set form.
*
* We consider a target prefix all those that are relevant when
* synchronizing two stores. That is, all those that hold paxos service's
* versions, as well as paxos versions, or any control keys such as the
* first or last committed version.
*
* Given the current design, this function should return the name of all and
* any available paxos service, plus the paxos name.
*
* @returns a set of strings referring to the prefixes being synchronized
*/
std::set<std::string> get_sync_targets_names();
/**
* Reset the monitor's sync-related data structures for syncing *from* a peer
*/
void sync_reset_requester();
/**
* Reset sync state related to allowing others to sync from us
*/
void sync_reset_provider();
/**
* Caled when a sync attempt times out (requester-side)
*/
void sync_timeout();
/**
* Get the latest monmap for backup purposes during sync
*/
void sync_obtain_latest_monmap(ceph::buffer::list &bl);
/**
* Start sync process
*
* Start pulling committed state from another monitor.
*
* @param entity where to pull committed state from
* @param full whether to do a full sync or just catch up on recent paxos
*/
void sync_start(entity_addrvec_t &addrs, bool full);
public:
/**
* force a sync on next mon restart
*/
void sync_force(ceph::Formatter *f);
private:
/**
* store critical state for safekeeping during sync
*
* We store a few things on the side that we don't want to get clobbered by sync. This
* includes the latest monmap and a lower bound on last_committed.
*/
void sync_stash_critical_state(MonitorDBStore::TransactionRef tx);
/**
* reset the sync timeout
*
* This is used on the client to restart if things aren't progressing
*/
void sync_reset_timeout();
/**
* trim stale sync provider state
*
* If someone is syncing from us and hasn't talked to us recently, expire their state.
*/
void sync_trim_providers();
/**
* Complete a sync
*
* Finish up a sync after we've gotten all of the chunks.
*
* @param last_committed final last_committed value from provider
*/
void sync_finish(version_t last_committed);
/**
* request the next chunk from the provider
*/
void sync_get_next_chunk();
/**
* handle sync message
*
* @param m Sync message with operation type MMonSync::OP_START_CHUNKS
*/
void handle_sync(MonOpRequestRef op);
void _sync_reply_no_cookie(MonOpRequestRef op);
void handle_sync_get_cookie(MonOpRequestRef op);
void handle_sync_get_chunk(MonOpRequestRef op);
void handle_sync_finish(MonOpRequestRef op);
void handle_sync_cookie(MonOpRequestRef op);
void handle_sync_forward(MonOpRequestRef op);
void handle_sync_chunk(MonOpRequestRef op);
void handle_sync_no_cookie(MonOpRequestRef op);
/**
* @} // Synchronization
*/
std::list<Context*> waitfor_quorum;
std::list<Context*> maybe_wait_for_quorum;
/**
* @defgroup Monitor_h_TimeCheck Monitor Clock Drift Early Warning System
* @{
*
* We use time checks to keep track of any clock drifting going on in the
* cluster. This is accomplished by periodically ping each monitor in the
* quorum and register its response time on a map, assessing how much its
* clock has drifted. We also take this opportunity to assess the latency
* on response.
*
* This mechanism works as follows:
*
* - Leader sends out a 'PING' message to each other monitor in the quorum.
* The message is timestamped with the leader's current time. The leader's
* current time is recorded in a map, associated with each peon's
* instance.
* - The peon replies to the leader with a timestamped 'PONG' message.
* - The leader calculates a delta between the peon's timestamp and its
* current time and stashes it.
* - The leader also calculates the time it took to receive the 'PONG'
* since the 'PING' was sent, and stashes an approximate latency estimate.
* - Once all the quorum members have pong'ed, the leader will share the
* clock skew and latency maps with all the monitors in the quorum.
*/
std::map<int, utime_t> timecheck_waiting;
std::map<int, double> timecheck_skews;
std::map<int, double> timecheck_latencies;
// odd value means we are mid-round; even value means the round has
// finished.
version_t timecheck_round;
unsigned int timecheck_acks;
utime_t timecheck_round_start;
friend class HealthMonitor;
/* When we hit a skew we will start a new round based off of
* 'mon_timecheck_skew_interval'. Each new round will be backed off
* until we hit 'mon_timecheck_interval' -- which is the typical
* interval when not in the presence of a skew.
*
* This variable tracks the number of rounds with skews since last clean
* so that we can report to the user and properly adjust the backoff.
*/
uint64_t timecheck_rounds_since_clean;
/**
* Time Check event.
*/
Context *timecheck_event;
void timecheck_start();
void timecheck_finish();
void timecheck_start_round();
void timecheck_finish_round(bool success = true);
void timecheck_cancel_round();
void timecheck_cleanup();
void timecheck_reset_event();
void timecheck_check_skews();
void timecheck_report();
void timecheck();
health_status_t timecheck_status(std::ostringstream &ss,
const double skew_bound,
const double latency);
void handle_timecheck_leader(MonOpRequestRef op);
void handle_timecheck_peon(MonOpRequestRef op);
void handle_timecheck(MonOpRequestRef op);
/**
* Returns 'true' if this is considered to be a skew; 'false' otherwise.
*/
bool timecheck_has_skew(const double skew_bound, double *abs) const {
double abs_skew = std::fabs(skew_bound);
if (abs)
*abs = abs_skew;
return (abs_skew > g_conf()->mon_clock_drift_allowed);
}
/**
* @}
*/
/**
* Handle ping messages from others.
*/
void handle_ping(MonOpRequestRef op);
Context *probe_timeout_event = nullptr; // for probing
void reset_probe_timeout();
void cancel_probe_timeout();
void probe_timeout(int r);
void _apply_compatset_features(CompatSet &new_features);
public:
epoch_t get_epoch();
int get_leader() const { return leader; }
std::string get_leader_name() {
return quorum.empty() ? std::string() : monmap->get_name(leader);
}
const std::set<int>& get_quorum() const { return quorum; }
std::list<std::string> get_quorum_names() {
std::list<std::string> q;
for (auto p = quorum.begin(); p != quorum.end(); ++p)
q.push_back(monmap->get_name(*p));
return q;
}
uint64_t get_quorum_con_features() const {
return quorum_con_features;
}
mon_feature_t get_quorum_mon_features() const {
return quorum_mon_features;
}
uint64_t get_required_features() const {
return required_features;
}
mon_feature_t get_required_mon_features() const {
return monmap->get_required_features();
}
void apply_quorum_to_compatset_features();
void apply_monmap_to_compatset_features();
void calc_quorum_requirements();
void get_combined_feature_map(FeatureMap *fm);
private:
void _reset(); ///< called from bootstrap, start_, or join_election
void wait_for_paxos_write();
void _finish_svc_election(); ///< called by {win,lose}_election
void respawn();
public:
void bootstrap();
void join_election();
void start_election();
void win_standalone_election();
// end election (called by Elector)
void win_election(epoch_t epoch, const std::set<int>& q,
uint64_t features,
const mon_feature_t& mon_features,
ceph_release_t min_mon_release,
const std::map<int,Metadata>& metadata);
void lose_election(epoch_t epoch, std::set<int>& q, int l,
uint64_t features,
const mon_feature_t& mon_features,
ceph_release_t min_mon_release);
// end election (called by Elector)
void finish_election();
void update_logger();
/**
* Vector holding the Services serviced by this Monitor.
*/
std::array<std::unique_ptr<PaxosService>, PAXOS_NUM> paxos_service;
class MDSMonitor *mdsmon() {
return (class MDSMonitor *)paxos_service[PAXOS_MDSMAP].get();
}
class MonmapMonitor *monmon() {
return (class MonmapMonitor *)paxos_service[PAXOS_MONMAP].get();
}
class OSDMonitor *osdmon() {
return (class OSDMonitor *)paxos_service[PAXOS_OSDMAP].get();
}
class AuthMonitor *authmon() {
return (class AuthMonitor *)paxos_service[PAXOS_AUTH].get();
}
class LogMonitor *logmon() {
return (class LogMonitor*) paxos_service[PAXOS_LOG].get();
}
class MgrMonitor *mgrmon() {
return (class MgrMonitor*) paxos_service[PAXOS_MGR].get();
}
class MgrStatMonitor *mgrstatmon() {
return (class MgrStatMonitor*) paxos_service[PAXOS_MGRSTAT].get();
}
class HealthMonitor *healthmon() {
return (class HealthMonitor*) paxos_service[PAXOS_HEALTH].get();
}
class ConfigMonitor *configmon() {
return (class ConfigMonitor*) paxos_service[PAXOS_CONFIG].get();
}
class KVMonitor *kvmon() {
return (class KVMonitor*) paxos_service[PAXOS_KV].get();
}
friend class Paxos;
friend class OSDMonitor;
friend class MDSMonitor;
friend class MonmapMonitor;
friend class LogMonitor;
friend class KVMonitor;
// -- sessions --
MonSessionMap session_map;
ceph::mutex session_map_lock = ceph::make_mutex("Monitor::session_map_lock");
AdminSocketHook *admin_hook;
template<typename Func, typename...Args>
void with_session_map(Func&& func) {
std::lock_guard l(session_map_lock);
std::forward<Func>(func)(session_map);
}
void send_latest_monmap(Connection *con);
// messages
void handle_get_version(MonOpRequestRef op);
void handle_subscribe(MonOpRequestRef op);
void handle_mon_get_map(MonOpRequestRef op);
static void _generate_command_map(cmdmap_t& cmdmap,
std::map<std::string,std::string> ¶m_str_map);
static const MonCommand *_get_moncommand(
const std::string &cmd_prefix,
const std::vector<MonCommand>& cmds);
bool _allowed_command(MonSession *s, const std::string& module,
const std::string& prefix,
const cmdmap_t& cmdmap,
const std::map<std::string,std::string>& param_str_map,
const MonCommand *this_cmd);
void get_mon_status(ceph::Formatter *f);
void _quorum_status(ceph::Formatter *f, std::ostream& ss);
bool _add_bootstrap_peer_hint(std::string_view cmd, const cmdmap_t& cmdmap,
std::ostream& ss);
void handle_tell_command(MonOpRequestRef op);
void handle_command(MonOpRequestRef op);
void handle_route(MonOpRequestRef op);
int get_mon_metadata(int mon, ceph::Formatter *f, std::ostream& err);
int print_nodes(ceph::Formatter *f, std::ostream& err);
// track metadata reported by win_election()
std::map<int, Metadata> mon_metadata;
std::map<int, Metadata> pending_metadata;
/**
*
*/
struct health_cache_t {
health_status_t overall;
std::string summary;
void reset() {
// health_status_t doesn't really have a NONE value and we're not
// okay with setting something else (say, HEALTH_ERR). so just
// leave it be.
summary.clear();
}
} health_status_cache;
Context *health_tick_event = nullptr;
Context *health_interval_event = nullptr;
void health_tick_start();
void health_tick_stop();
ceph::real_clock::time_point health_interval_calc_next_update();
void health_interval_start();
void health_interval_stop();
void health_events_cleanup();
void health_to_clog_update_conf(const std::set<std::string> &changed);
void do_health_to_clog_interval();
void do_health_to_clog(bool force = false);
void log_health(
const health_check_map_t& updated,
const health_check_map_t& previous,
MonitorDBStore::TransactionRef t);
void update_pending_metadata();
protected:
class HealthCheckLogStatus {
public:
health_status_t severity;
std::string last_message;
utime_t updated_at = 0;
HealthCheckLogStatus(health_status_t severity_,
const std::string &last_message_,
utime_t updated_at_)
: severity(severity_),
last_message(last_message_),
updated_at(updated_at_)
{}
};
std::map<std::string, HealthCheckLogStatus> health_check_log_times;
public:
void get_cluster_status(std::stringstream &ss, ceph::Formatter *f,
MonSession *session);
void reply_command(MonOpRequestRef op, int rc, const std::string &rs, version_t version);
void reply_command(MonOpRequestRef op, int rc, const std::string &rs, ceph::buffer::list& rdata, version_t version);
void reply_tell_command(MonOpRequestRef op, int rc, const std::string &rs);
void handle_probe(MonOpRequestRef op);
/**
* Handle a Probe Operation, replying with our name, quorum and known versions.
*
* We use the MMonProbe message class for anything and everything related with
* Monitor probing. One of the operations relates directly with the probing
* itself, in which we receive a probe request and to which we reply with
* our name, our quorum and the known versions for each Paxos service. Thus the
* redundant function name. This reply will obviously be sent to the one
* probing/requesting these infos.
*
* @todo Add @pre and @post
*
* @param m A Probe message, with an operation of type Probe.
*/
void handle_probe_probe(MonOpRequestRef op);
void handle_probe_reply(MonOpRequestRef op);
// request routing
struct RoutedRequest {
uint64_t tid;
ceph::buffer::list request_bl;
MonSession *session;
ConnectionRef con;
uint64_t con_features;
MonOpRequestRef op;
RoutedRequest() : tid(0), session(NULL), con_features(0) {}
~RoutedRequest() {
if (session)
session->put();
}
};
uint64_t routed_request_tid;
std::map<uint64_t, RoutedRequest*> routed_requests;
void forward_request_leader(MonOpRequestRef op);
void handle_forward(MonOpRequestRef op);
void send_reply(MonOpRequestRef op, Message *reply);
void no_reply(MonOpRequestRef op);
void resend_routed_requests();
void remove_session(MonSession *s);
void remove_all_sessions();
void waitlist_or_zap_client(MonOpRequestRef op);
void send_mon_message(Message *m, int rank);
/** can_change_external_state if we can do things like
* call elections as a result of the new map.
*/
void notify_new_monmap(bool can_change_external_state=false, bool remove_rank_elector=true);
public:
struct C_Command : public C_MonOp {
Monitor &mon;
int rc;
std::string rs;
ceph::buffer::list rdata;
version_t version;
C_Command(Monitor &_mm, MonOpRequestRef _op, int r, std::string s, version_t v) :
C_MonOp(_op), mon(_mm), rc(r), rs(s), version(v){}
C_Command(Monitor &_mm, MonOpRequestRef _op, int r, std::string s, ceph::buffer::list rd, version_t v) :
C_MonOp(_op), mon(_mm), rc(r), rs(s), rdata(rd), version(v){}
void _finish(int r) override {
auto m = op->get_req<MMonCommand>();
if (r >= 0) {
std::ostringstream ss;
if (!op->get_req()->get_connection()) {
ss << "connection dropped for command ";
} else {
MonSession *s = op->get_session();
// if client drops we may not have a session to draw information from.
if (s) {
ss << "from='" << s->name << " " << s->addrs << "' "
<< "entity='" << s->entity_name << "' ";
} else {
ss << "session dropped for command ";
}
}
cmdmap_t cmdmap;
std::ostringstream ds;
std::string prefix;
cmdmap_from_json(m->cmd, &cmdmap, ds);
cmd_getval(cmdmap, "prefix", prefix);
if (prefix != "config set" && prefix != "config-key set")
ss << "cmd='" << m->cmd << "': finished";
mon.audit_clog->info() << ss.str();
mon.reply_command(op, rc, rs, rdata, version);
}
else if (r == -ECANCELED)
return;
else if (r == -EAGAIN)
mon.dispatch_op(op);
else
ceph_abort_msg("bad C_Command return value");
}
};
private:
class C_RetryMessage : public C_MonOp {
Monitor *mon;
public:
C_RetryMessage(Monitor *m, MonOpRequestRef op) :
C_MonOp(op), mon(m) { }
void _finish(int r) override {
if (r == -EAGAIN || r >= 0)
mon->dispatch_op(op);
else if (r == -ECANCELED)
return;
else
ceph_abort_msg("bad C_RetryMessage return value");
}
};
//ms_dispatch handles a lot of logic and we want to reuse it
//on forwarded messages, so we create a non-locking version for this class
void _ms_dispatch(Message *m);
bool ms_dispatch(Message *m) override {
std::lock_guard l{lock};
_ms_dispatch(m);
return true;
}
void dispatch_op(MonOpRequestRef op);
//mon_caps is used for un-connected messages from monitors
MonCap mon_caps;
bool get_authorizer(int dest_type, AuthAuthorizer **authorizer);
public: // for AuthMonitor msgr1:
int ms_handle_authentication(Connection *con) override;
private:
void ms_handle_accept(Connection *con) override;
bool ms_handle_reset(Connection *con) override;
void ms_handle_remote_reset(Connection *con) override {}
bool ms_handle_refused(Connection *con) override;
// AuthClient
int get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *method,
std::vector<uint32_t> *preferred_modes,
ceph::buffer::list *out) override;
int handle_auth_reply_more(
Connection *con,
AuthConnectionMeta *auth_meta,
const ceph::buffer::list& bl,
ceph::buffer::list *reply) override;
int handle_auth_done(
Connection *con,
AuthConnectionMeta *auth_meta,
uint64_t global_id,
uint32_t con_mode,
const ceph::buffer::list& bl,
CryptoKey *session_key,
std::string *connection_secret) override;
int handle_auth_bad_method(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) override;
// /AuthClient
// AuthServer
int handle_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
bool more,
uint32_t auth_method,
const ceph::buffer::list& bl,
ceph::buffer::list *reply) override;
// /AuthServer
int write_default_keyring(ceph::buffer::list& bl);
void extract_save_mon_key(KeyRing& keyring);
void collect_metadata(Metadata *m);
int load_metadata();
void count_metadata(const std::string& field, ceph::Formatter *f);
void count_metadata(const std::string& field, std::map<std::string,int> *out);
// get_all_versions() gathers version information from daemons for health check
void get_all_versions(std::map<std::string, std::list<std::string>> &versions);
void get_versions(std::map<std::string, std::list<std::string>> &versions);
// features
static CompatSet get_initial_supported_features();
static CompatSet get_supported_features();
static CompatSet get_legacy_features();
/// read the ondisk features into the CompatSet pointed to by read_features
static void read_features_off_disk(MonitorDBStore *store, CompatSet *read_features);
void read_features();
void write_features(MonitorDBStore::TransactionRef t);
OpTracker op_tracker;
public:
Monitor(CephContext *cct_, std::string nm, MonitorDBStore *s,
Messenger *m, Messenger *mgr_m, MonMap *map);
~Monitor() override;
static int check_features(MonitorDBStore *store);
// config observer
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) override;
void update_log_clients();
int sanitize_options();
int preinit();
int init();
void init_paxos();
void refresh_from_paxos(bool *need_bootstrap);
void shutdown();
void tick();
void handle_signal(int sig);
int mkfs(ceph::buffer::list& osdmapbl);
/**
* check cluster_fsid file
*
* @return EEXIST if file exists and doesn't match, 0 on match, or negative error code
*/
int check_fsid();
/**
* write cluster_fsid file
*
* @return 0 on success, or negative error code
*/
int write_fsid();
int write_fsid(MonitorDBStore::TransactionRef t);
int do_admin_command(std::string_view command, const cmdmap_t& cmdmap,
ceph::Formatter *f,
std::ostream& err,
std::ostream& out);
private:
// don't allow copying
Monitor(const Monitor& rhs);
Monitor& operator=(const Monitor &rhs);
public:
static void format_command_descriptions(const std::vector<MonCommand> &commands,
ceph::Formatter *f,
uint64_t features,
ceph::buffer::list *rdata);
const std::vector<MonCommand> &get_local_commands(mon_feature_t f) {
if (f.contains_all(ceph::features::mon::FEATURE_NAUTILUS)) {
return local_mon_commands;
} else {
return prenautilus_local_mon_commands;
}
}
const ceph::buffer::list& get_local_commands_bl(mon_feature_t f) {
if (f.contains_all(ceph::features::mon::FEATURE_NAUTILUS)) {
return local_mon_commands_bl;
} else {
return prenautilus_local_mon_commands_bl;
}
}
void set_leader_commands(const std::vector<MonCommand>& cmds) {
leader_mon_commands = cmds;
}
bool is_keyring_required();
};
#define CEPH_MON_FEATURE_INCOMPAT_BASE CompatSet::Feature (1, "initial feature set (~v.18)")
#define CEPH_MON_FEATURE_INCOMPAT_GV CompatSet::Feature (2, "global version sequencing (v0.52)")
#define CEPH_MON_FEATURE_INCOMPAT_SINGLE_PAXOS CompatSet::Feature (3, "single paxos with k/v store (v0.\?)")
#define CEPH_MON_FEATURE_INCOMPAT_OSD_ERASURE_CODES CompatSet::Feature(4, "support erasure code pools")
#define CEPH_MON_FEATURE_INCOMPAT_OSDMAP_ENC CompatSet::Feature(5, "new-style osdmap encoding")
#define CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V2 CompatSet::Feature(6, "support isa/lrc erasure code")
#define CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V3 CompatSet::Feature(7, "support shec erasure code")
#define CEPH_MON_FEATURE_INCOMPAT_KRAKEN CompatSet::Feature(8, "support monmap features")
#define CEPH_MON_FEATURE_INCOMPAT_LUMINOUS CompatSet::Feature(9, "luminous ondisk layout")
#define CEPH_MON_FEATURE_INCOMPAT_MIMIC CompatSet::Feature(10, "mimic ondisk layout")
#define CEPH_MON_FEATURE_INCOMPAT_NAUTILUS CompatSet::Feature(11, "nautilus ondisk layout")
#define CEPH_MON_FEATURE_INCOMPAT_OCTOPUS CompatSet::Feature(12, "octopus ondisk layout")
#define CEPH_MON_FEATURE_INCOMPAT_PACIFIC CompatSet::Feature(13, "pacific ondisk layout")
#define CEPH_MON_FEATURE_INCOMPAT_QUINCY CompatSet::Feature(14, "quincy ondisk layout")
#define CEPH_MON_FEATURE_INCOMPAT_REEF CompatSet::Feature(15, "reef ondisk layout")
// make sure you add your feature to Monitor::get_supported_features
/* Callers use:
*
* new C_MonContext{...}
*
* instead of
*
* new C_MonContext(...)
*
* because of gcc bug [1].
*
* [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85883
*/
template<typename T>
class C_MonContext : public LambdaContext<T> {
public:
C_MonContext(const Monitor* m, T&& f) :
LambdaContext<T>(std::forward<T>(f)),
mon(m)
{}
void finish(int r) override {
if (mon->is_shutdown())
return;
LambdaContext<T>::finish(r);
}
private:
const Monitor* mon;
};
#endif
| 35,945 | 30.230235 | 118 |
h
|
null |
ceph-main/src/mon/MonitorDBStore.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef CEPH_MONITOR_DB_STORE_H
#define CEPH_MONITOR_DB_STORE_H
#include "include/types.h"
#include "include/buffer.h"
#include <set>
#include <map>
#include <string>
#include <boost/scoped_ptr.hpp>
#include <sstream>
#include <fstream>
#include "kv/KeyValueDB.h"
#include "include/ceph_assert.h"
#include "common/Formatter.h"
#include "common/Finisher.h"
#include "common/errno.h"
#include "common/debug.h"
#include "common/safe_io.h"
#include "common/blkdev.h"
#include "common/PriorityCache.h"
#define dout_context g_ceph_context
class MonitorDBStore
{
std::string path;
boost::scoped_ptr<KeyValueDB> db;
bool do_dump;
int dump_fd_binary;
std::ofstream dump_fd_json;
ceph::JSONFormatter dump_fmt;
Finisher io_work;
bool is_open;
public:
std::string get_devname() {
char devname[4096] = {0}, partition[4096];
get_device_by_path(path.c_str(), partition, devname,
sizeof(devname));
return devname;
}
std::string get_path() {
return path;
}
std::shared_ptr<PriorityCache::PriCache> get_priority_cache() const {
return db->get_priority_cache();
}
struct Op {
uint8_t type;
std::string prefix;
std::string key, endkey;
ceph::buffer::list bl;
Op()
: type(0) { }
Op(int t, const std::string& p, const std::string& k)
: type(t), prefix(p), key(k) { }
Op(int t, const std::string& p, const std::string& k, const ceph::buffer::list& b)
: type(t), prefix(p), key(k), bl(b) { }
Op(int t, const std::string& p, const std::string& start, const std::string& end)
: type(t), prefix(p), key(start), endkey(end) { }
void encode(ceph::buffer::list& encode_bl) const {
ENCODE_START(2, 1, encode_bl);
encode(type, encode_bl);
encode(prefix, encode_bl);
encode(key, encode_bl);
encode(bl, encode_bl);
encode(endkey, encode_bl);
ENCODE_FINISH(encode_bl);
}
void decode(ceph::buffer::list::const_iterator& decode_bl) {
DECODE_START(2, decode_bl);
decode(type, decode_bl);
decode(prefix, decode_bl);
decode(key, decode_bl);
decode(bl, decode_bl);
if (struct_v >= 2)
decode(endkey, decode_bl);
DECODE_FINISH(decode_bl);
}
void dump(ceph::Formatter *f) const {
f->dump_int("type", type);
f->dump_string("prefix", prefix);
f->dump_string("key", key);
if (endkey.length()) {
f->dump_string("endkey", endkey);
}
}
int approx_size() const {
return 6 + 1 +
4 + prefix.size() +
4 + key.size() +
4 + endkey.size() +
4 + bl.length();
}
static void generate_test_instances(std::list<Op*>& ls) {
ls.push_back(new Op);
// we get coverage here from the Transaction instances
}
};
struct Transaction;
typedef std::shared_ptr<Transaction> TransactionRef;
struct Transaction {
std::list<Op> ops;
uint64_t bytes, keys;
Transaction() : bytes(6 + 4 + 8*2), keys(0) {}
enum {
OP_PUT = 1,
OP_ERASE = 2,
OP_COMPACT = 3,
OP_ERASE_RANGE = 4,
};
void put(const std::string& prefix, const std::string& key, const ceph::buffer::list& bl) {
ops.push_back(Op(OP_PUT, prefix, key, bl));
++keys;
bytes += ops.back().approx_size();
}
void put(const std::string& prefix, version_t ver, const ceph::buffer::list& bl) {
std::ostringstream os;
os << ver;
put(prefix, os.str(), bl);
}
void put(const std::string& prefix, const std::string& key, version_t ver) {
using ceph::encode;
ceph::buffer::list bl;
encode(ver, bl);
put(prefix, key, bl);
}
void erase(const std::string& prefix, const std::string& key) {
ops.push_back(Op(OP_ERASE, prefix, key));
++keys;
bytes += ops.back().approx_size();
}
void erase(const std::string& prefix, version_t ver) {
std::ostringstream os;
os << ver;
erase(prefix, os.str());
}
void erase_range(const std::string& prefix, const std::string& begin,
const std::string& end) {
ops.push_back(Op(OP_ERASE_RANGE, prefix, begin, end));
++keys;
bytes += ops.back().approx_size();
}
void compact_prefix(const std::string& prefix) {
ops.push_back(Op(OP_COMPACT, prefix, {}));
}
void compact_range(const std::string& prefix, const std::string& start,
const std::string& end) {
ops.push_back(Op(OP_COMPACT, prefix, start, end));
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 1, bl);
encode(ops, bl);
encode(bytes, bl);
encode(keys, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(2, bl);
decode(ops, bl);
if (struct_v >= 2) {
decode(bytes, bl);
decode(keys, bl);
}
DECODE_FINISH(bl);
}
static void generate_test_instances(std::list<Transaction*>& ls) {
ls.push_back(new Transaction);
ls.push_back(new Transaction);
ceph::buffer::list bl;
bl.append("value");
ls.back()->put("prefix", "key", bl);
ls.back()->erase("prefix2", "key2");
ls.back()->erase_range("prefix3", "key3", "key4");
ls.back()->compact_prefix("prefix3");
ls.back()->compact_range("prefix4", "from", "to");
}
void append(TransactionRef other) {
ops.splice(ops.end(), other->ops);
keys += other->keys;
bytes += other->bytes;
}
void append_from_encoded(ceph::buffer::list& bl) {
auto other(std::make_shared<Transaction>());
auto it = bl.cbegin();
other->decode(it);
append(other);
}
bool empty() {
return (size() == 0);
}
size_t size() const {
return ops.size();
}
uint64_t get_keys() const {
return keys;
}
uint64_t get_bytes() const {
return bytes;
}
void dump(ceph::Formatter *f, bool dump_val=false) const {
f->open_object_section("transaction");
f->open_array_section("ops");
int op_num = 0;
for (auto it = ops.begin(); it != ops.end(); ++it) {
const Op& op = *it;
f->open_object_section("op");
f->dump_int("op_num", op_num++);
switch (op.type) {
case OP_PUT:
{
f->dump_string("type", "PUT");
f->dump_string("prefix", op.prefix);
f->dump_string("key", op.key);
f->dump_unsigned("length", op.bl.length());
if (dump_val) {
std::ostringstream os;
op.bl.hexdump(os);
f->dump_string("bl", os.str());
}
}
break;
case OP_ERASE:
{
f->dump_string("type", "ERASE");
f->dump_string("prefix", op.prefix);
f->dump_string("key", op.key);
}
break;
case OP_ERASE_RANGE:
{
f->dump_string("type", "ERASE_RANGE");
f->dump_string("prefix", op.prefix);
f->dump_string("start", op.key);
f->dump_string("end", op.endkey);
}
break;
case OP_COMPACT:
{
f->dump_string("type", "COMPACT");
f->dump_string("prefix", op.prefix);
f->dump_string("start", op.key);
f->dump_string("end", op.endkey);
}
break;
default:
{
f->dump_string("type", "unknown");
f->dump_unsigned("op_code", op.type);
break;
}
}
f->close_section();
}
f->close_section();
f->dump_unsigned("num_keys", keys);
f->dump_unsigned("num_bytes", bytes);
f->close_section();
}
};
int apply_transaction(MonitorDBStore::TransactionRef t) {
KeyValueDB::Transaction dbt = db->get_transaction();
if (do_dump) {
if (!g_conf()->mon_debug_dump_json) {
ceph::buffer::list bl;
t->encode(bl);
bl.write_fd(dump_fd_binary);
} else {
t->dump(&dump_fmt, true);
dump_fmt.flush(dump_fd_json);
dump_fd_json.flush();
}
}
std::list<std::pair<std::string, std::pair<std::string,std::string>>> compact;
for (auto it = t->ops.begin(); it != t->ops.end(); ++it) {
const Op& op = *it;
switch (op.type) {
case Transaction::OP_PUT:
dbt->set(op.prefix, op.key, op.bl);
break;
case Transaction::OP_ERASE:
dbt->rmkey(op.prefix, op.key);
break;
case Transaction::OP_ERASE_RANGE:
dbt->rm_range_keys(op.prefix, op.key, op.endkey);
break;
case Transaction::OP_COMPACT:
compact.push_back(make_pair(op.prefix, make_pair(op.key, op.endkey)));
break;
default:
derr << __func__ << " unknown op type " << op.type << dendl;
ceph_abort();
break;
}
}
int r = db->submit_transaction_sync(dbt);
if (r >= 0) {
while (!compact.empty()) {
if (compact.front().second.first == std::string() &&
compact.front().second.second == std::string())
db->compact_prefix_async(compact.front().first);
else
db->compact_range_async(compact.front().first, compact.front().second.first, compact.front().second.second);
compact.pop_front();
}
} else {
ceph_abort_msg("failed to write to db");
}
return r;
}
struct C_DoTransaction : public Context {
MonitorDBStore *store;
MonitorDBStore::TransactionRef t;
Context *oncommit;
C_DoTransaction(MonitorDBStore *s, MonitorDBStore::TransactionRef t,
Context *f)
: store(s), t(t), oncommit(f)
{}
void finish(int r) override {
/* The store serializes writes. Each transaction is handled
* sequentially by the io_work Finisher. If a transaction takes longer
* to apply its state to permanent storage, then no other transaction
* will be handled meanwhile.
*
* We will now randomly inject random delays. We can safely sleep prior
* to applying the transaction as it won't break the model.
*/
double delay_prob = g_conf()->mon_inject_transaction_delay_probability;
if (delay_prob && (rand() % 10000 < delay_prob * 10000.0)) {
utime_t delay;
double delay_max = g_conf()->mon_inject_transaction_delay_max;
delay.set_from_double(delay_max * (double)(rand() % 10000) / 10000.0);
lsubdout(g_ceph_context, mon, 1)
<< "apply_transaction will be delayed for " << delay
<< " seconds" << dendl;
delay.sleep();
}
int ret = store->apply_transaction(t);
oncommit->complete(ret);
}
};
/**
* queue transaction
*
* Queue a transaction to commit asynchronously. Trigger a context
* on completion (without any locks held).
*/
void queue_transaction(MonitorDBStore::TransactionRef t,
Context *oncommit) {
io_work.queue(new C_DoTransaction(this, t, oncommit));
}
/**
* block and flush all io activity
*/
void flush() {
io_work.wait_for_empty();
}
class StoreIteratorImpl {
protected:
bool done;
std::pair<std::string,std::string> last_key;
ceph::buffer::list crc_bl;
StoreIteratorImpl() : done(false) { }
virtual ~StoreIteratorImpl() { }
virtual bool _is_valid() = 0;
public:
__u32 crc() {
if (g_conf()->mon_sync_debug)
return crc_bl.crc32c(0);
return 0;
}
std::pair<std::string,std::string> get_last_key() {
return last_key;
}
virtual bool has_next_chunk() {
return !done && _is_valid();
}
virtual void get_chunk_tx(TransactionRef tx, uint64_t max_bytes,
uint64_t max_keys) = 0;
virtual std::pair<std::string,std::string> get_next_key() = 0;
};
typedef std::shared_ptr<StoreIteratorImpl> Synchronizer;
class WholeStoreIteratorImpl : public StoreIteratorImpl {
KeyValueDB::WholeSpaceIterator iter;
std::set<std::string> sync_prefixes;
public:
WholeStoreIteratorImpl(KeyValueDB::WholeSpaceIterator iter,
std::set<std::string> &prefixes)
: StoreIteratorImpl(),
iter(iter),
sync_prefixes(prefixes)
{ }
~WholeStoreIteratorImpl() override { }
/**
* Obtain a chunk of the store
*
* @param bl Encoded transaction that will recreate the chunk
* @param first_key Pair containing the first key to obtain, and that
* will contain the first key in the chunk (that may
* differ from the one passed on to the function)
* @param last_key[out] Last key in the chunk
*/
void get_chunk_tx(TransactionRef tx, uint64_t max_bytes,
uint64_t max_keys) override {
using ceph::encode;
ceph_assert(done == false);
ceph_assert(iter->valid() == true);
while (iter->valid()) {
std::string prefix(iter->raw_key().first);
std::string key(iter->raw_key().second);
if (sync_prefixes.count(prefix)) {
ceph::buffer::list value = iter->value();
if (tx->empty() ||
(tx->get_bytes() + value.length() + key.size() +
prefix.size() < max_bytes &&
tx->get_keys() < max_keys)) {
// NOTE: putting every key in a separate transaction is
// questionable as far as efficiency goes
auto tmp(std::make_shared<Transaction>());
tmp->put(prefix, key, value);
tx->append(tmp);
if (g_conf()->mon_sync_debug) {
encode(prefix, crc_bl);
encode(key, crc_bl);
encode(value, crc_bl);
}
} else {
last_key.first = prefix;
last_key.second = key;
return;
}
}
iter->next();
}
ceph_assert(iter->valid() == false);
done = true;
}
std::pair<std::string,std::string> get_next_key() override {
ceph_assert(iter->valid());
for (; iter->valid(); iter->next()) {
std::pair<std::string,std::string> r = iter->raw_key();
if (sync_prefixes.count(r.first) > 0) {
iter->next();
return r;
}
}
return std::pair<std::string,std::string>();
}
bool _is_valid() override {
return iter->valid();
}
};
Synchronizer get_synchronizer(std::pair<std::string,std::string> &key,
std::set<std::string> &prefixes) {
KeyValueDB::WholeSpaceIterator iter;
iter = db->get_wholespace_iterator();
if (!key.first.empty() && !key.second.empty())
iter->upper_bound(key.first, key.second);
else
iter->seek_to_first();
return std::shared_ptr<StoreIteratorImpl>(
new WholeStoreIteratorImpl(iter, prefixes)
);
}
KeyValueDB::Iterator get_iterator(const std::string &prefix) {
ceph_assert(!prefix.empty());
KeyValueDB::Iterator iter = db->get_iterator(prefix);
iter->seek_to_first();
return iter;
}
KeyValueDB::WholeSpaceIterator get_iterator() {
KeyValueDB::WholeSpaceIterator iter;
iter = db->get_wholespace_iterator();
iter->seek_to_first();
return iter;
}
int get(const std::string& prefix, const std::string& key, ceph::buffer::list& bl) {
ceph_assert(bl.length() == 0);
return db->get(prefix, key, &bl);
}
int get(const std::string& prefix, const version_t ver, ceph::buffer::list& bl) {
std::ostringstream os;
os << ver;
return get(prefix, os.str(), bl);
}
version_t get(const std::string& prefix, const std::string& key) {
using ceph::decode;
ceph::buffer::list bl;
int err = get(prefix, key, bl);
if (err < 0) {
if (err == -ENOENT) // if key doesn't exist, assume its value is 0
return 0;
// we're not expecting any other negative return value, and we can't
// just return a negative value if we're returning a version_t
generic_dout(0) << "MonitorDBStore::get() error obtaining"
<< " (" << prefix << ":" << key << "): "
<< cpp_strerror(err) << dendl;
ceph_abort_msg("error obtaining key");
}
ceph_assert(bl.length());
version_t ver;
auto p = bl.cbegin();
decode(ver, p);
return ver;
}
bool exists(const std::string& prefix, const std::string& key) {
KeyValueDB::Iterator it = db->get_iterator(prefix);
int err = it->lower_bound(key);
if (err < 0)
return false;
return (it->valid() && it->key() == key);
}
bool exists(const std::string& prefix, version_t ver) {
std::ostringstream os;
os << ver;
return exists(prefix, os.str());
}
std::string combine_strings(const std::string& prefix, const std::string& value) {
std::string out = prefix;
out.push_back('_');
out.append(value);
return out;
}
std::string combine_strings(const std::string& prefix, const version_t ver) {
std::ostringstream os;
os << ver;
return combine_strings(prefix, os.str());
}
void clear(std::set<std::string>& prefixes) {
KeyValueDB::Transaction dbt = db->get_transaction();
for (auto iter = prefixes.begin(); iter != prefixes.end(); ++iter) {
dbt->rmkeys_by_prefix((*iter));
}
int r = db->submit_transaction_sync(dbt);
ceph_assert(r >= 0);
}
void _open(const std::string& kv_type) {
int pos = 0;
for (auto rit = path.rbegin(); rit != path.rend(); ++rit, ++pos) {
if (*rit != '/')
break;
}
std::ostringstream os;
os << path.substr(0, path.size() - pos) << "/store.db";
std::string full_path = os.str();
KeyValueDB *db_ptr = KeyValueDB::create(g_ceph_context,
kv_type,
full_path);
if (!db_ptr) {
derr << __func__ << " error initializing "
<< kv_type << " db back storage in "
<< full_path << dendl;
ceph_abort_msg("MonitorDBStore: error initializing keyvaluedb back storage");
}
db.reset(db_ptr);
if (g_conf()->mon_debug_dump_transactions) {
if (!g_conf()->mon_debug_dump_json) {
dump_fd_binary = ::open(
g_conf()->mon_debug_dump_location.c_str(),
O_CREAT|O_APPEND|O_WRONLY|O_CLOEXEC, 0644);
if (dump_fd_binary < 0) {
dump_fd_binary = -errno;
derr << "Could not open log file, got "
<< cpp_strerror(dump_fd_binary) << dendl;
}
} else {
dump_fmt.reset();
dump_fmt.open_array_section("dump");
dump_fd_json.open(g_conf()->mon_debug_dump_location.c_str());
}
do_dump = true;
}
if (kv_type == "rocksdb")
db->init(g_conf()->mon_rocksdb_options);
else
db->init();
}
int open(std::ostream &out) {
std::string kv_type;
int r = read_meta("kv_backend", &kv_type);
if (r < 0 || kv_type.empty()) {
// assume old monitors that did not mark the type were RocksDB.
kv_type = "rocksdb";
r = write_meta("kv_backend", kv_type);
if (r < 0)
return r;
}
_open(kv_type);
r = db->open(out);
if (r < 0)
return r;
// Monitors are few in number, so the resource cost of exposing
// very detailed stats is low: ramp up the priority of all the
// KV store's perf counters. Do this after open, because backend may
// not have constructed PerfCounters earlier.
if (db->get_perf_counters()) {
db->get_perf_counters()->set_prio_adjust(
PerfCountersBuilder::PRIO_USEFUL - PerfCountersBuilder::PRIO_DEBUGONLY);
}
io_work.start();
is_open = true;
return 0;
}
int create_and_open(std::ostream &out) {
// record the type before open
std::string kv_type;
int r = read_meta("kv_backend", &kv_type);
if (r < 0) {
kv_type = g_conf()->mon_keyvaluedb;
r = write_meta("kv_backend", kv_type);
if (r < 0)
return r;
}
_open(kv_type);
r = db->create_and_open(out);
if (r < 0)
return r;
io_work.start();
is_open = true;
return 0;
}
void close() {
// there should be no work queued!
io_work.stop();
is_open = false;
db.reset(NULL);
}
void compact() {
db->compact();
}
void compact_async() {
db->compact_async();
}
void compact_prefix(const std::string& prefix) {
db->compact_prefix(prefix);
}
uint64_t get_estimated_size(std::map<std::string, uint64_t> &extras) {
return db->get_estimated_size(extras);
}
/**
* write_meta - write a simple configuration key out-of-band
*
* Write a simple key/value pair for basic store configuration
* (e.g., a uuid or magic number) to an unopened/unmounted store.
* The default implementation writes this to a plaintext file in the
* path.
*
* A newline is appended.
*
* @param key key name (e.g., "fsid")
* @param value value (e.g., a uuid rendered as a string)
* @returns 0 for success, or an error code
*/
int write_meta(const std::string& key,
const std::string& value) const {
std::string v = value;
v += "\n";
int r = safe_write_file(path.c_str(), key.c_str(),
v.c_str(), v.length(),
0600);
if (r < 0)
return r;
return 0;
}
/**
* read_meta - read a simple configuration key out-of-band
*
* Read a simple key value to an unopened/mounted store.
*
* Trailing whitespace is stripped off.
*
* @param key key name
* @param value pointer to value string
* @returns 0 for success, or an error code
*/
int read_meta(const std::string& key,
std::string *value) const {
char buf[4096];
int r = safe_read_file(path.c_str(), key.c_str(),
buf, sizeof(buf));
if (r <= 0)
return r;
// drop trailing newlines
while (r && isspace(buf[r-1])) {
--r;
}
*value = std::string(buf, r);
return 0;
}
explicit MonitorDBStore(const std::string& path)
: path(path),
db(0),
do_dump(false),
dump_fd_binary(-1),
dump_fmt(true),
io_work(g_ceph_context, "monstore", "fn_monstore"),
is_open(false) {
}
~MonitorDBStore() {
ceph_assert(!is_open);
if (do_dump) {
if (!g_conf()->mon_debug_dump_json) {
::close(dump_fd_binary);
} else {
dump_fmt.close_section();
dump_fmt.flush(dump_fd_json);
dump_fd_json.flush();
dump_fd_json.close();
}
}
}
};
WRITE_CLASS_ENCODER(MonitorDBStore::Op)
WRITE_CLASS_ENCODER(MonitorDBStore::Transaction)
#endif /* CEPH_MONITOR_DB_STORE_H */
| 22,224 | 26.269939 | 111 |
h
|
null |
ceph-main/src/mon/MonmapMonitor.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "MonmapMonitor.h"
#include "Monitor.h"
#include "OSDMonitor.h"
#include "messages/MMonCommand.h"
#include "messages/MMonJoin.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include <sstream>
#include "common/config.h"
#include "common/cmdparse.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#define dout_subsys ceph_subsys_mon
#undef dout_prefix
#define dout_prefix _prefix(_dout, mon)
using namespace TOPNSPC::common;
using std::cout;
using std::dec;
using std::hex;
using std::list;
using std::map;
using std::make_pair;
using std::ostream;
using std::ostringstream;
using std::pair;
using std::set;
using std::setfill;
using std::string;
using std::stringstream;
using std::to_string;
using std::vector;
using std::unique_ptr;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
using ceph::Formatter;
using ceph::JSONFormatter;
using ceph::make_message;
using ceph::mono_clock;
using ceph::mono_time;
using ceph::timespan_str;
static ostream& _prefix(std::ostream *_dout, Monitor &mon) {
return *_dout << "mon." << mon.name << "@" << mon.rank
<< "(" << mon.get_state_name()
<< ").monmap v" << mon.monmap->epoch << " ";
}
void MonmapMonitor::create_initial()
{
dout(10) << __func__ << " using current monmap" << dendl;
pending_map = *mon.monmap;
pending_map.epoch = 1;
if (g_conf()->mon_debug_no_initial_persistent_features) {
derr << __func__ << " mon_debug_no_initial_persistent_features=true"
<< dendl;
} else {
// initialize with default persistent features for new clusters
pending_map.persistent_features = ceph::features::mon::get_persistent();
pending_map.min_mon_release = ceph_release();
}
}
void MonmapMonitor::update_from_paxos(bool *need_bootstrap)
{
version_t version = get_last_committed();
if (version <= mon.monmap->get_epoch())
return;
dout(10) << __func__ << " version " << version
<< ", my v " << mon.monmap->epoch << dendl;
if (need_bootstrap && version != mon.monmap->get_epoch()) {
dout(10) << " signaling that we need a bootstrap" << dendl;
*need_bootstrap = true;
}
// read and decode
monmap_bl.clear();
int ret = get_version(version, monmap_bl);
ceph_assert(ret == 0);
ceph_assert(monmap_bl.length());
dout(10) << __func__ << " got " << version << dendl;
mon.monmap->decode(monmap_bl);
if (mon.store->exists("mkfs", "monmap")) {
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->erase("mkfs", "monmap");
mon.store->apply_transaction(t);
}
check_subs();
// make sure we've recorded min_mon_release
string val;
if (mon.store->read_meta("min_mon_release", &val) < 0 ||
val.size() == 0 ||
atoi(val.c_str()) != (int)ceph_release()) {
dout(10) << __func__ << " updating min_mon_release meta" << dendl;
mon.store->write_meta("min_mon_release",
stringify(ceph_release()));
}
mon.notify_new_monmap(true);
}
void MonmapMonitor::create_pending()
{
pending_map = *mon.monmap;
pending_map.epoch++;
pending_map.last_changed = ceph_clock_now();
pending_map.removed_ranks.clear();
}
void MonmapMonitor::encode_pending(MonitorDBStore::TransactionRef t)
{
dout(10) << __func__ << " epoch " << pending_map.epoch << dendl;
ceph_assert(mon.monmap->epoch + 1 == pending_map.epoch ||
pending_map.epoch == 1); // special case mkfs!
bufferlist bl;
pending_map.encode(bl, mon.get_quorum_con_features());
put_version(t, pending_map.epoch, bl);
put_last_committed(t, pending_map.epoch);
// generate a cluster fingerprint, too?
if (pending_map.epoch == 1) {
mon.prepare_new_fingerprint(t);
}
//health
health_check_map_t next;
pending_map.check_health(&next);
encode_health(next, t);
}
class C_ApplyFeatures : public Context {
MonmapMonitor *svc;
mon_feature_t features;
ceph_release_t min_mon_release;
public:
C_ApplyFeatures(MonmapMonitor *s, const mon_feature_t& f, ceph_release_t mmr) :
svc(s), features(f), min_mon_release(mmr) { }
void finish(int r) override {
if (r >= 0) {
svc->apply_mon_features(features, min_mon_release);
} else if (r == -EAGAIN || r == -ECANCELED) {
// discard features if we're no longer on the quorum that
// established them in the first place.
return;
} else {
ceph_abort_msg("bad C_ApplyFeatures return value");
}
}
};
void MonmapMonitor::apply_mon_features(const mon_feature_t& features,
ceph_release_t min_mon_release)
{
if (!is_writeable()) {
dout(5) << __func__ << " wait for service to be writeable" << dendl;
wait_for_writeable_ctx(new C_ApplyFeatures(this, features, min_mon_release));
return;
}
// do nothing here unless we have a full quorum
if (mon.get_quorum().size() < mon.monmap->size()) {
return;
}
ceph_assert(is_writeable());
ceph_assert(features.contains_all(pending_map.persistent_features));
// we should never hit this because `features` should be the result
// of the quorum's supported features. But if it happens, die.
ceph_assert(ceph::features::mon::get_supported().contains_all(features));
mon_feature_t new_features =
(pending_map.persistent_features ^
(features & ceph::features::mon::get_persistent()));
if (new_features.empty() &&
pending_map.min_mon_release == min_mon_release) {
dout(10) << __func__ << " min_mon_release (" << (int)min_mon_release
<< ") and features (" << features << ") match" << dendl;
return;
}
if (!new_features.empty()) {
dout(1) << __func__ << " applying new features "
<< new_features << ", had " << pending_map.persistent_features
<< ", will have "
<< (new_features | pending_map.persistent_features)
<< dendl;
pending_map.persistent_features |= new_features;
}
if (min_mon_release > pending_map.min_mon_release) {
dout(1) << __func__ << " increasing min_mon_release to "
<< to_integer<int>(min_mon_release) << " (" << min_mon_release
<< ")" << dendl;
pending_map.min_mon_release = min_mon_release;
}
propose_pending();
}
void MonmapMonitor::on_active()
{
if (get_last_committed() >= 1 && !mon.has_ever_joined) {
// make note of the fact that i was, once, part of the quorum.
dout(10) << "noting that i was, once, part of an active quorum." << dendl;
/* This is some form of nasty in-breeding we have between the MonmapMonitor
and the Monitor itself. We should find a way to get rid of it given our
new architecture. Until then, stick with it since we are a
single-threaded process and, truth be told, no one else relies on this
thing besides us.
*/
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->put(Monitor::MONITOR_NAME, "joined", 1);
mon.store->apply_transaction(t);
mon.has_ever_joined = true;
}
if (mon.is_leader()) {
mon.clog->debug() << "monmap " << *mon.monmap;
}
apply_mon_features(mon.get_quorum_mon_features(),
mon.quorum_min_mon_release);
mon.update_pending_metadata();
}
bool MonmapMonitor::preprocess_query(MonOpRequestRef op)
{
auto m = op->get_req<PaxosServiceMessage>();
switch (m->get_type()) {
// READs
case MSG_MON_COMMAND:
try {
return preprocess_command(op);
}
catch (const bad_cmd_get& e) {
bufferlist bl;
mon.reply_command(op, -EINVAL, e.what(), bl, get_last_committed());
return true;
}
case MSG_MON_JOIN:
return preprocess_join(op);
default:
ceph_abort();
return true;
}
}
void MonmapMonitor::dump_info(Formatter *f)
{
f->dump_unsigned("monmap_first_committed", get_first_committed());
f->dump_unsigned("monmap_last_committed", get_last_committed());
f->open_object_section("monmap");
mon.monmap->dump(f);
f->close_section();
f->open_array_section("quorum");
for (set<int>::iterator q = mon.get_quorum().begin(); q != mon.get_quorum().end(); ++q)
f->dump_int("mon", *q);
f->close_section();
}
bool MonmapMonitor::preprocess_command(MonOpRequestRef op)
{
auto m = op->get_req<MMonCommand>();
int r = -1;
bufferlist rdata;
stringstream ss;
cmdmap_t cmdmap;
if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) {
string rs = ss.str();
mon.reply_command(op, -EINVAL, rs, rdata, get_last_committed());
return true;
}
string prefix;
cmd_getval(cmdmap, "prefix", prefix);
MonSession *session = op->get_session();
if (!session) {
mon.reply_command(op, -EACCES, "access denied", get_last_committed());
return true;
}
string format = cmd_getval_or<string>(cmdmap, "format", "plain");
boost::scoped_ptr<Formatter> f(Formatter::create(format));
if (prefix == "mon stat") {
if (f) {
f->open_object_section("monmap");
mon.monmap->dump_summary(f.get());
f->dump_string("leader", mon.get_leader_name());
f->open_array_section("quorum");
for (auto rank: mon.get_quorum()) {
std::string name = mon.monmap->get_name(rank);
f->open_object_section("mon");
f->dump_int("rank", rank);
f->dump_string("name", name);
f->close_section(); // mon
}
f->close_section(); // quorum
f->close_section(); // monmap
f->flush(ss);
} else {
mon.monmap->print_summary(ss);
ss << ", election epoch " << mon.get_epoch() << ", leader "
<< mon.get_leader() << " " << mon.get_leader_name()
<< ", quorum " << mon.get_quorum()
<< " " << mon.get_quorum_names();
}
rdata.append(ss);
ss.str("");
r = 0;
} else if (prefix == "mon getmap" ||
prefix == "mon dump") {
epoch_t epoch;
int64_t epochnum = cmd_getval_or<int64_t>(cmdmap, "epoch", 0);
epoch = epochnum;
MonMap *p = mon.monmap;
if (epoch) {
bufferlist bl;
r = get_version(epoch, bl);
if (r == -ENOENT) {
ss << "there is no map for epoch " << epoch;
goto reply;
}
ceph_assert(r == 0);
ceph_assert(bl.length() > 0);
p = new MonMap;
p->decode(bl);
}
ceph_assert(p);
if (prefix == "mon getmap") {
p->encode(rdata, m->get_connection()->get_features());
r = 0;
ss << "got monmap epoch " << p->get_epoch();
} else if (prefix == "mon dump") {
stringstream ds;
if (f) {
f->open_object_section("monmap");
p->dump(f.get());
f->open_array_section("quorum");
for (set<int>::iterator q = mon.get_quorum().begin();
q != mon.get_quorum().end(); ++q) {
f->dump_int("mon", *q);
}
f->close_section();
f->close_section();
f->flush(ds);
r = 0;
} else {
p->print(ds);
r = 0;
}
rdata.append(ds);
ss << "dumped monmap epoch " << p->get_epoch();
}
if (p != mon.monmap) {
delete p;
p = nullptr;
}
} else if (prefix == "mon feature ls") {
bool list_with_value = false;
cmd_getval_compat_cephbool(cmdmap, "with_value", list_with_value);
MonMap *p = mon.monmap;
// list features
mon_feature_t supported = ceph::features::mon::get_supported();
mon_feature_t persistent = ceph::features::mon::get_persistent();
mon_feature_t required = p->get_required_features();
stringstream ds;
auto print_feature = [&](mon_feature_t& m_features, const char* m_str) {
if (f) {
if (list_with_value)
m_features.dump_with_value(f.get(), m_str);
else
m_features.dump(f.get(), m_str);
} else {
if (list_with_value)
m_features.print_with_value(ds);
else
m_features.print(ds);
}
};
if (f) {
f->open_object_section("features");
f->open_object_section("all");
print_feature(supported, "supported");
print_feature(persistent, "persistent");
f->close_section(); // all
f->open_object_section("monmap");
print_feature(p->persistent_features, "persistent");
print_feature(p->optional_features, "optional");
print_feature(required, "required");
f->close_section(); // monmap
f->close_section(); // features
f->flush(ds);
} else {
ds << "all features" << std::endl
<< "\tsupported: ";
print_feature(supported, nullptr);
ds << std::endl
<< "\tpersistent: ";
print_feature(persistent, nullptr);
ds << std::endl
<< std::endl;
ds << "on current monmap (epoch "
<< p->get_epoch() << ")" << std::endl
<< "\tpersistent: ";
print_feature(p->persistent_features, nullptr);
ds << std::endl
// omit optional features in plain-text
// makes it easier to read, and they're, currently, empty.
<< "\trequired: ";
print_feature(required, nullptr);
ds << std::endl;
}
rdata.append(ds);
r = 0;
}
reply:
if (r != -1) {
string rs;
getline(ss, rs);
mon.reply_command(op, r, rs, rdata, get_last_committed());
return true;
} else
return false;
}
bool MonmapMonitor::prepare_update(MonOpRequestRef op)
{
auto m = op->get_req<PaxosServiceMessage>();
dout(7) << __func__ << " " << *m << " from " << m->get_orig_source_inst() << dendl;
switch (m->get_type()) {
case MSG_MON_COMMAND:
try {
return prepare_command(op);
} catch (const bad_cmd_get& e) {
bufferlist bl;
mon.reply_command(op, -EINVAL, e.what(), bl, get_last_committed());
return true;
}
case MSG_MON_JOIN:
return prepare_join(op);
default:
ceph_abort();
}
return false;
}
bool MonmapMonitor::prepare_command(MonOpRequestRef op)
{
auto m = op->get_req<MMonCommand>();
stringstream ss;
string rs;
int err = -EINVAL;
cmdmap_t cmdmap;
if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) {
string rs = ss.str();
mon.reply_command(op, -EINVAL, rs, get_last_committed());
return true;
}
string prefix;
cmd_getval(cmdmap, "prefix", prefix);
MonSession *session = op->get_session();
if (!session) {
mon.reply_command(op, -EACCES, "access denied", get_last_committed());
return true;
}
/* We should follow the following rules:
*
* - 'monmap' is the current, consistent version of the monmap
* - 'pending_map' is the uncommitted version of the monmap
*
* All checks for the current state must be made against 'monmap'.
* All changes are made against 'pending_map'.
*
* If there are concurrent operations modifying 'pending_map', please
* follow the following rules.
*
* - if pending_map has already been changed, the second operation must
* wait for the proposal to finish and be run again; This is the easiest
* path to guarantee correctness but may impact performance (i.e., it
* will take longer for the user to get a reply).
*
* - if the result of the second operation can be guaranteed to be
* idempotent, the operation may reply to the user once the proposal
* finishes; still needs to wait for the proposal to finish.
*
* - An operation _NEVER_ returns to the user based on pending state.
*
* If an operation does not modify current stable monmap, it may be
* serialized before current pending map, regardless of any change that
* has been made to the pending map -- remember, pending is uncommitted
* state, thus we are not bound by it.
*/
ceph_assert(mon.monmap);
MonMap &monmap = *mon.monmap;
/* Please note:
*
* Adding or removing monitors may lead to loss of quorum.
*
* Because quorum may be lost, it's important to reply something
* to the user, lest she end up waiting forever for a reply. And
* no reply will ever be sent until quorum is formed again.
*
* On the other hand, this means we're leaking uncommitted state
* to the user. As such, please be mindful of the reply message.
*
* e.g., 'adding monitor mon.foo' is okay ('adding' is an on-going
* operation and conveys its not-yet-permanent nature); whereas
* 'added monitor mon.foo' presumes the action has successfully
* completed and state has been committed, which may not be true.
*/
bool propose = false;
if (prefix == "mon add") {
string name;
cmd_getval(cmdmap, "name", name);
string addrstr;
cmd_getval(cmdmap, "addr", addrstr);
entity_addr_t addr;
bufferlist rdata;
if (!addr.parse(addrstr)) {
err = -EINVAL;
ss << "addr " << addrstr << "does not parse";
goto reply;
}
vector<string> locationvec;
map<string, string> loc;
cmd_getval(cmdmap, "location", locationvec);
CrushWrapper::parse_loc_map(locationvec, &loc);
if (locationvec.size() &&
!mon.get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_PINGING)) {
err = -ENOTSUP;
ss << "Not all monitors support adding monitors with a location; please upgrade first!";
goto reply;
}
if (locationvec.size() && !loc.size()) {
ss << "We could not parse your input location to anything real; " << locationvec
<< " turned into an empty map!";
err = -EINVAL;
goto reply;
}
dout(10) << "mon add setting location for " << name << " to " << loc << dendl;
// TODO: validate location in crush map
if (monmap.stretch_mode_enabled && !loc.size()) {
ss << "We are in stretch mode and new monitors must have a location, but "
<< "could not parse your input location to anything real; " << locationvec
<< " turned into an empty map!";
err = -EINVAL;
goto reply;
}
// TODO: validate location against any existing stretch config
entity_addrvec_t addrs;
if (monmap.persistent_features.contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
if (addr.get_port() == CEPH_MON_PORT_IANA) {
addr.set_type(entity_addr_t::TYPE_MSGR2);
}
if (addr.get_port() == CEPH_MON_PORT_LEGACY) {
// if they specified the *old* default they probably don't care
addr.set_port(0);
}
if (addr.get_port()) {
addrs.v.push_back(addr);
} else {
addr.set_type(entity_addr_t::TYPE_MSGR2);
addr.set_port(CEPH_MON_PORT_IANA);
addrs.v.push_back(addr);
addr.set_type(entity_addr_t::TYPE_LEGACY);
addr.set_port(CEPH_MON_PORT_LEGACY);
addrs.v.push_back(addr);
}
} else {
if (addr.get_port() == 0) {
addr.set_port(CEPH_MON_PORT_LEGACY);
}
addr.set_type(entity_addr_t::TYPE_LEGACY);
addrs.v.push_back(addr);
}
dout(20) << __func__ << " addr " << addr << " -> addrs " << addrs << dendl;
/**
* If we have a monitor with the same name and different addr, then EEXIST
* If we have a monitor with the same addr and different name, then EEXIST
* If we have a monitor with the same addr and same name, then wait for
* the proposal to finish and return success.
* If we don't have the monitor, add it.
*/
err = 0;
if (!ss.str().empty())
ss << "; ";
do {
if (monmap.contains(name)) {
if (monmap.get_addrs(name) == addrs) {
// stable map contains monitor with the same name at the same address.
// serialize before current pending map.
err = 0; // for clarity; this has already been set above.
ss << "mon." << name << " at " << addrs << " already exists";
goto reply;
} else {
ss << "mon." << name
<< " already exists at address " << monmap.get_addrs(name);
}
} else if (monmap.contains(addrs)) {
// we established on the previous branch that name is different
ss << "mon." << monmap.get_name(addrs)
<< " already exists at address " << addr;
} else {
// go ahead and add
break;
}
err = -EEXIST;
goto reply;
} while (false);
if (pending_map.stretch_mode_enabled) {
}
/* Given there's no delay between proposals on the MonmapMonitor (see
* MonmapMonitor::should_propose()), there is no point in checking for
* a mismatch between name and addr on pending_map.
*
* Once we established the monitor does not exist in the committed state,
* we can simply go ahead and add the monitor.
*/
pending_map.add(name, addrs);
pending_map.mon_info[name].crush_loc = loc;
pending_map.last_changed = ceph_clock_now();
ss << "adding mon." << name << " at " << addrs;
propose = true;
dout(0) << __func__ << " proposing new mon." << name << dendl;
} else if (prefix == "mon remove" ||
prefix == "mon rm") {
string name;
cmd_getval(cmdmap, "name", name);
if (!monmap.contains(name)) {
err = 0;
ss << "mon." << name << " does not exist or has already been removed";
goto reply;
}
if (monmap.size() == 1) {
err = -EINVAL;
ss << "error: refusing removal of last monitor " << name;
goto reply;
}
if (pending_map.stretch_mode_enabled &&
name == pending_map.tiebreaker_mon) {
err = -EINVAL;
ss << "you cannot remove stretch mode's tiebreaker monitor";
goto reply;
}
/* At the time of writing, there is no risk of races when multiple clients
* attempt to use the same name. The reason is simple but may not be
* obvious.
*
* In a nutshell, we do not collate proposals on the MonmapMonitor. As
* soon as we return 'true' below, PaxosService::dispatch() will check if
* the service should propose, and - if so - the service will be marked as
* 'proposing' and a proposal will be triggered. The PaxosService class
* guarantees that once a service is marked 'proposing' no further writes
* will be handled.
*
* The decision on whether the service should propose or not is, in this
* case, made by MonmapMonitor::should_propose(), which always considers
* the proposal delay being 0.0 seconds. This is key for PaxosService to
* trigger the proposal immediately.
* 0.0 seconds of delay.
*
* From the above, there's no point in performing further checks on the
* pending_map, as we don't ever have multiple proposals in-flight in
* this service. As we've established the committed state contains the
* monitor, we can simply go ahead and remove it.
*
* Please note that the code hinges on all of the above to be true. It
* has been true since time immemorial and we don't see a good reason
* to make it sturdier at this time - mainly because we don't think it's
* going to change any time soon, lest for any bug that may be unwillingly
* introduced.
*/
entity_addrvec_t addrs = pending_map.get_addrs(name);
pending_map.remove(name);
pending_map.disallowed_leaders.erase(name);
pending_map.last_changed = ceph_clock_now();
propose = true;
err = 0;
} else if (prefix == "mon feature set") {
/* PLEASE NOTE:
*
* We currently only support setting/unsetting persistent features.
* This is by design, given at the moment we still don't have optional
* features, and, as such, there is no point introducing an interface
* to manipulate them. This allows us to provide a cleaner, more
* intuitive interface to the user, modifying solely persistent
* features.
*
* In the future we should consider adding another interface to handle
* optional features/flags; e.g., 'mon feature flag set/unset', or
* 'mon flag set/unset'.
*/
string feature_name;
if (!cmd_getval(cmdmap, "feature_name", feature_name)) {
ss << "missing required feature name";
err = -EINVAL;
goto reply;
}
mon_feature_t feature;
feature = ceph::features::mon::get_feature_by_name(feature_name);
if (feature == ceph::features::mon::FEATURE_NONE) {
ss << "unknown feature '" << feature_name << "'";
err = -ENOENT;
goto reply;
}
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) {
ss << "please specify '--yes-i-really-mean-it' if you "
<< "really, **really** want to set feature '"
<< feature << "' in the monmap.";
err = -EPERM;
goto reply;
}
if (!mon.get_quorum_mon_features().contains_all(feature)) {
ss << "current quorum does not support feature '" << feature
<< "'; supported features: "
<< mon.get_quorum_mon_features();
err = -EINVAL;
goto reply;
}
ss << "setting feature '" << feature << "'";
err = 0;
if (monmap.persistent_features.contains_all(feature)) {
dout(10) << __func__ << " feature '" << feature
<< "' already set on monmap; no-op." << dendl;
goto reply;
}
pending_map.persistent_features.set_feature(feature);
pending_map.last_changed = ceph_clock_now();
propose = true;
dout(1) << __func__ << " " << ss.str() << "; new features will be: "
<< "persistent = " << pending_map.persistent_features
// output optional nevertheless, for auditing purposes.
<< ", optional = " << pending_map.optional_features << dendl;
} else if (prefix == "mon set-rank") {
string name;
int64_t rank;
if (!cmd_getval(cmdmap, "name", name) ||
!cmd_getval(cmdmap, "rank", rank)) {
err = -EINVAL;
goto reply;
}
int oldrank = pending_map.get_rank(name);
if (oldrank < 0) {
ss << "mon." << name << " does not exist in monmap";
err = -ENOENT;
goto reply;
}
err = 0;
pending_map.set_rank(name, rank);
pending_map.last_changed = ceph_clock_now();
propose = true;
} else if (prefix == "mon set-addrs") {
string name;
string addrs;
if (!cmd_getval(cmdmap, "name", name) ||
!cmd_getval(cmdmap, "addrs", addrs)) {
err = -EINVAL;
goto reply;
}
if (!pending_map.contains(name)) {
ss << "mon." << name << " does not exist";
err = -ENOENT;
goto reply;
}
entity_addrvec_t av;
if (!av.parse(addrs.c_str(), nullptr)) {
ss << "failed to parse addrs '" << addrs << "'";
err = -EINVAL;
goto reply;
}
for (auto& a : av.v) {
a.set_nonce(0);
if (!a.get_port()) {
ss << "monitor must bind to a non-zero port, not " << a;
err = -EINVAL;
goto reply;
}
}
err = 0;
pending_map.set_addrvec(name, av);
pending_map.last_changed = ceph_clock_now();
propose = true;
} else if (prefix == "mon set-weight") {
string name;
int64_t weight;
if (!cmd_getval(cmdmap, "name", name) ||
!cmd_getval(cmdmap, "weight", weight)) {
err = -EINVAL;
goto reply;
}
if (!pending_map.contains(name)) {
ss << "mon." << name << " does not exist";
err = -ENOENT;
goto reply;
}
err = 0;
pending_map.set_weight(name, weight);
pending_map.last_changed = ceph_clock_now();
propose = true;
} else if (prefix == "mon enable-msgr2") {
if (!monmap.get_required_features().contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
err = -EACCES;
ss << "all monitors must be running nautilus to enable v2";
goto reply;
}
for (auto& i : pending_map.mon_info) {
if (i.second.public_addrs.v.size() == 1 &&
i.second.public_addrs.front().is_legacy() &&
i.second.public_addrs.front().get_port() == CEPH_MON_PORT_LEGACY) {
entity_addrvec_t av;
entity_addr_t a = i.second.public_addrs.front();
a.set_type(entity_addr_t::TYPE_MSGR2);
a.set_port(CEPH_MON_PORT_IANA);
av.v.push_back(a);
av.v.push_back(i.second.public_addrs.front());
dout(10) << " setting mon." << i.first
<< " addrs " << i.second.public_addrs
<< " -> " << av << dendl;
pending_map.set_addrvec(i.first, av);
propose = true;
pending_map.last_changed = ceph_clock_now();
}
}
err = 0;
} else if (prefix == "mon set election_strategy") {
if (!mon.get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_PINGING)) {
err = -ENOTSUP;
ss << "Not all monitors support changing election strategies; please upgrade first!";
goto reply;
}
string strat;
MonMap::election_strategy strategy;
if (!cmd_getval(cmdmap, "strategy", strat)) {
err = -EINVAL;
goto reply;
}
if (strat == "classic") {
strategy = MonMap::CLASSIC;
} else if (strat == "disallow") {
strategy = MonMap::DISALLOW;
} else if (strat == "connectivity") {
strategy = MonMap::CONNECTIVITY;
} else {
err = -EINVAL;
goto reply;
}
err = 0;
pending_map.strategy = strategy;
pending_map.last_changed = ceph_clock_now();
propose = true;
} else if (prefix == "mon add disallowed_leader") {
if (!mon.get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_PINGING)) {
err = -ENOTSUP;
ss << "Not all monitors support changing election strategies; please upgrade first!";
goto reply;
}
string name;
if (!cmd_getval(cmdmap, "name", name)) {
err = -EINVAL;
goto reply;
}
if (pending_map.strategy != MonMap::DISALLOW &&
pending_map.strategy != MonMap::CONNECTIVITY) {
ss << "You cannot disallow monitors in your current election mode";
err = -EINVAL;
goto reply;
}
if (!pending_map.contains(name)) {
ss << "mon." << name << " does not exist";
err = -ENOENT;
goto reply;
}
if (pending_map.disallowed_leaders.count(name)) {
ss << "mon." << name << " is already disallowed";
err = 0;
goto reply;
}
if (pending_map.disallowed_leaders.size() == pending_map.size() - 1) {
ss << "mon." << name << " is the only remaining allowed leader!";
err = -EINVAL;
goto reply;
}
pending_map.disallowed_leaders.insert(name);
pending_map.last_changed = ceph_clock_now();
err = 0;
propose = true;
} else if (prefix == "mon rm disallowed_leader") {
if (!mon.get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_PINGING)) {
err = -ENOTSUP;
ss << "Not all monitors support changing election strategies; please upgrade first!";
goto reply;
}
string name;
if (!cmd_getval(cmdmap, "name", name)) {
err = -EINVAL;
goto reply;
}
if (pending_map.strategy != MonMap::DISALLOW &&
pending_map.strategy != MonMap::CONNECTIVITY) {
ss << "You cannot disallow monitors in your current election mode";
err = -EINVAL;
goto reply;
}
if (!pending_map.contains(name)) {
ss << "mon." << name << " does not exist";
err = -ENOENT;
goto reply;
}
if (!pending_map.disallowed_leaders.count(name)) {
ss << "mon." << name << " is already allowed";
err = 0;
goto reply;
}
pending_map.disallowed_leaders.erase(name);
pending_map.last_changed = ceph_clock_now();
err = 0;
propose = true;
} else if (prefix == "mon set_location") {
if (!mon.get_quorum_mon_features().contains_all(
ceph::features::mon::FEATURE_PINGING)) {
err = -ENOTSUP;
ss << "Not all monitors support monitor locations; please upgrade first!";
goto reply;
}
string name;
if (!cmd_getval(cmdmap, "name", name)) {
err = -EINVAL;
goto reply;
}
if (!pending_map.contains(name)) {
ss << "mon." << name << " does not exist";
err = -ENOENT;
goto reply;
}
vector<string> argvec;
map<string, string> loc;
cmd_getval(cmdmap, "args", argvec);
CrushWrapper::parse_loc_map(argvec, &loc);
dout(10) << "mon set_location for " << name << " to " << loc << dendl;
// TODO: validate location in crush map
if (!loc.size()) {
ss << "We could not parse your input location to anything real; " << argvec
<< " turned into an empty map!";
err = -EINVAL;
goto reply;
}
// TODO: validate location against any existing stretch config
pending_map.mon_info[name].crush_loc = loc;
pending_map.last_changed = ceph_clock_now();
err = 0;
propose = true;
} else if (prefix == "mon set_new_tiebreaker") {
if (!pending_map.stretch_mode_enabled) {
err = -EINVAL;
ss << "Stretch mode is not enabled, so there is no tiebreaker";
goto reply;
}
string name;
if (!cmd_getval(cmdmap, "name", name)) {
err = -EINVAL;
goto reply;
}
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
const auto &existing_tiebreaker_info_i = pending_map.mon_info.find(pending_map.tiebreaker_mon);
const auto &new_tiebreaker_info_i = pending_map.mon_info.find(name);
if (new_tiebreaker_info_i == pending_map.mon_info.end()) {
ss << "mon." << name << " does not exist";
err = -ENOENT;
goto reply;
}
const auto& new_info = new_tiebreaker_info_i->second;
if (new_info.crush_loc.empty()) {
ss << "mon." << name << " does not have a location specified";
err = -EINVAL;
goto reply;
}
if (!mon.osdmon()->is_readable()) {
dout(10) << __func__
<< ": waiting for osdmon readable to inspect crush barrier"
<< dendl;
mon.osdmon()->wait_for_readable(op, new Monitor::C_RetryMessage(&mon, op));
return false;
}
int32_t stretch_divider_id = mon.osdmon()->osdmap.stretch_mode_bucket;
string stretch_bucket_divider = mon.osdmon()->osdmap.crush->
get_type_name(stretch_divider_id);
const auto& new_loc_i = new_info.crush_loc.find(stretch_bucket_divider);
if (new_loc_i == new_info.crush_loc.end()) {
ss << "mon." << name << " has a specificed location, but not a "
<< stretch_bucket_divider << ", which is the stretch divider";
err = -EINVAL;
goto reply;
}
const string& new_loc = new_loc_i->second;
set<string> matching_mons;
for (const auto& mii : pending_map.mon_info) {
const auto& other_loc_i = mii.second.crush_loc.find(stretch_bucket_divider);
if (mii.first == name) {
continue;
}
if (other_loc_i == mii.second.crush_loc.end()) { // huh
continue;
}
const string& other_loc = other_loc_i->second;
if (other_loc == new_loc &&
mii.first != existing_tiebreaker_info_i->first) {
matching_mons.insert(mii.first);
}
}
if (!matching_mons.empty()) {
ss << "mon." << name << " has location " << new_loc_i->second
<< ", which matches mons " << matching_mons << " on the "
<< stretch_bucket_divider << " dividing bucket for stretch mode. "
"Pass --yes-i-really-mean-it if you're sure you want to do this."
"(You really don't.)";
err = -EINVAL;
goto reply;
}
pending_map.tiebreaker_mon = name;
pending_map.disallowed_leaders.insert(name);
pending_map.last_changed = ceph_clock_now();
err = 0;
propose = true;
} else if (prefix == "mon enable_stretch_mode") {
if (!mon.osdmon()->is_writeable()) {
dout(10) << __func__
<< ": waiting for osdmon writeable for stretch mode" << dendl;
mon.osdmon()->wait_for_writeable(op, new Monitor::C_RetryMessage(&mon, op));
return false;
}
{
if (monmap.stretch_mode_enabled) {
ss << "stretch mode is already engaged";
err = -EINVAL;
goto reply;
}
if (pending_map.stretch_mode_enabled) {
ss << "stretch mode currently committing";
err = 0;
goto reply;
}
string tiebreaker_mon;
if (!cmd_getval(cmdmap, "tiebreaker_mon", tiebreaker_mon)) {
ss << "must specify a tiebreaker monitor";
err = -EINVAL;
goto reply;
}
string new_crush_rule;
if (!cmd_getval(cmdmap, "new_crush_rule", new_crush_rule)) {
ss << "must specify a new crush rule that spreads out copies over multiple sites";
err = -EINVAL;
goto reply;
}
string dividing_bucket;
if (!cmd_getval(cmdmap, "dividing_bucket", dividing_bucket)) {
ss << "must specify a dividing bucket";
err = -EINVAL;
goto reply;
}
//okay, initial arguments make sense, check pools and cluster state
err = mon.osdmon()->check_cluster_features(CEPH_FEATUREMASK_STRETCH_MODE, ss);
if (err)
goto reply;
struct Plugger {
Paxos &p;
Plugger(Paxos &p) : p(p) { p.plug(); }
~Plugger() { p.unplug(); }
} plugger(paxos);
set<pg_pool_t*> pools;
bool okay = false;
int errcode = 0;
mon.osdmon()->try_enable_stretch_mode_pools(ss, &okay, &errcode,
&pools, new_crush_rule);
if (!okay) {
err = errcode;
goto reply;
}
try_enable_stretch_mode(ss, &okay, &errcode, false,
tiebreaker_mon, dividing_bucket);
if (!okay) {
err = errcode;
goto reply;
}
mon.osdmon()->try_enable_stretch_mode(ss, &okay, &errcode, false,
dividing_bucket, 2, pools, new_crush_rule);
if (!okay) {
err = errcode;
goto reply;
}
// everything looks good, actually commit the changes!
try_enable_stretch_mode(ss, &okay, &errcode, true,
tiebreaker_mon, dividing_bucket);
mon.osdmon()->try_enable_stretch_mode(ss, &okay, &errcode, true,
dividing_bucket,
2, // right now we only support 2 sites
pools, new_crush_rule);
ceph_assert(okay == true);
}
request_proposal(mon.osdmon());
err = 0;
propose = true;
} else {
ss << "unknown command " << prefix;
err = -EINVAL;
}
reply:
getline(ss, rs);
mon.reply_command(op, err, rs, get_last_committed());
// we are returning to the user; do not propose.
return propose;
}
void MonmapMonitor::try_enable_stretch_mode(stringstream& ss, bool *okay,
int *errcode, bool commit,
const string& tiebreaker_mon,
const string& dividing_bucket)
{
dout(20) << __func__ << dendl;
*okay = false;
if (pending_map.strategy != MonMap::CONNECTIVITY) {
ss << "Monitors must use the connectivity strategy to enable stretch mode";
*errcode = -EINVAL;
ceph_assert(!commit);
return;
}
if (!pending_map.contains(tiebreaker_mon)) {
ss << "mon " << tiebreaker_mon << "does not seem to exist";
*errcode = -ENOENT;
ceph_assert(!commit);
return;
}
map<string,string> buckets;
for (const auto&mii : mon.monmap->mon_info) {
const auto& mi = mii.second;
const auto& bi = mi.crush_loc.find(dividing_bucket);
if (bi == mi.crush_loc.end()) {
ss << "Could not find location entry for " << dividing_bucket
<< " on monitor " << mi.name;
*errcode = -EINVAL;
ceph_assert(!commit);
return;
}
buckets[mii.first] = bi->second;
}
string bucket1, bucket2, tiebreaker_bucket;
for (auto& i : buckets) {
if (i.first == tiebreaker_mon) {
tiebreaker_bucket = i.second;
continue;
}
if (bucket1.empty()) {
bucket1 = i.second;
}
if (bucket1 != i.second &&
bucket2.empty()) {
bucket2 = i.second;
}
if (bucket1 != i.second &&
bucket2 != i.second) {
ss << "There are too many monitor buckets for stretch mode, found "
<< bucket1 << "," << bucket2 << "," << i.second;
*errcode = -EINVAL;
ceph_assert(!commit);
return;
}
}
if (bucket1.empty() || bucket2.empty()) {
ss << "There are not enough monitor buckets for stretch mode;"
<< " must have at least 2 plus the tiebreaker but only found "
<< (bucket1.empty() ? bucket1 : bucket2);
*errcode = -EINVAL;
ceph_assert(!commit);
return;
}
if (tiebreaker_bucket == bucket1 ||
tiebreaker_bucket == bucket2) {
ss << "The named tiebreaker monitor " << tiebreaker_mon
<< " is in the same CRUSH bucket " << tiebreaker_bucket
<< " as other monitors";
*errcode = -EINVAL;
ceph_assert(!commit);
return;
}
if (commit) {
pending_map.disallowed_leaders.insert(tiebreaker_mon);
pending_map.tiebreaker_mon = tiebreaker_mon;
pending_map.stretch_mode_enabled = true;
}
*okay = true;
}
void MonmapMonitor::trigger_degraded_stretch_mode(const set<string>& dead_mons)
{
dout(20) << __func__ << dendl;
pending_map.stretch_marked_down_mons.insert(dead_mons.begin(), dead_mons.end());
propose_pending();
}
void MonmapMonitor::trigger_healthy_stretch_mode()
{
dout(20) << __func__ << dendl;
pending_map.stretch_marked_down_mons.clear();
propose_pending();
}
bool MonmapMonitor::preprocess_join(MonOpRequestRef op)
{
auto join = op->get_req<MMonJoin>();
dout(10) << __func__ << " " << join->name << " at " << join->addrs << dendl;
MonSession *session = op->get_session();
if (!session ||
!session->is_capable("mon", MON_CAP_W | MON_CAP_X)) {
dout(10) << " insufficient caps" << dendl;
return true;
}
const auto name_info_i = pending_map.mon_info.find(join->name);
if (name_info_i != pending_map.mon_info.end() &&
!name_info_i->second.public_addrs.front().is_blank_ip() &&
(!join->force_loc || join->crush_loc == name_info_i->second.crush_loc)) {
dout(10) << " already have " << join->name << dendl;
return true;
}
string addr_name;
if (pending_map.contains(join->addrs)) {
addr_name = pending_map.get_name(join->addrs);
}
if (!addr_name.empty() &&
addr_name == join->name &&
(!join->force_loc || join->crush_loc.empty() ||
pending_map.mon_info[addr_name].crush_loc == join->crush_loc)) {
dout(10) << " already have " << join->addrs << dendl;
return true;
}
if (pending_map.stretch_mode_enabled &&
join->crush_loc.empty() &&
(addr_name.empty() ||
pending_map.mon_info[addr_name].crush_loc.empty())) {
dout(10) << "stretch mode engaged but no source of crush_loc" << dendl;
mon.clog->info() << join->name << " attempted to join from " << join->name
<< ' ' << join->addrs
<< "; but lacks a crush_location for stretch mode";
return true;
}
return false;
}
bool MonmapMonitor::prepare_join(MonOpRequestRef op)
{
auto join = op->get_req<MMonJoin>();
dout(0) << "adding/updating " << join->name
<< " at " << join->addrs << " to monitor cluster" << dendl;
map<string,string> existing_loc;
if (pending_map.contains(join->addrs)) {
string name = pending_map.get_name(join->addrs);
existing_loc = pending_map.mon_info[name].crush_loc;
pending_map.remove(name);
}
if (pending_map.contains(join->name))
pending_map.remove(join->name);
pending_map.add(join->name, join->addrs);
pending_map.mon_info[join->name].crush_loc =
((join->force_loc || existing_loc.empty()) ?
join->crush_loc : existing_loc);
pending_map.last_changed = ceph_clock_now();
return true;
}
bool MonmapMonitor::should_propose(double& delay)
{
delay = 0.0;
return true;
}
int MonmapMonitor::get_monmap(bufferlist &bl)
{
version_t latest_ver = get_last_committed();
dout(10) << __func__ << " ver " << latest_ver << dendl;
if (!mon.store->exists(get_service_name(), stringify(latest_ver)))
return -ENOENT;
int err = get_version(latest_ver, bl);
if (err < 0) {
dout(1) << __func__ << " error obtaining monmap: "
<< cpp_strerror(err) << dendl;
return err;
}
return 0;
}
void MonmapMonitor::check_subs()
{
const string type = "monmap";
mon.with_session_map([this, &type](const MonSessionMap& session_map) {
auto subs = session_map.subs.find(type);
if (subs == session_map.subs.end())
return;
for (auto sub : *subs->second) {
check_sub(sub);
}
});
}
void MonmapMonitor::check_sub(Subscription *sub)
{
const auto epoch = mon.monmap->get_epoch();
dout(10) << __func__
<< " monmap next " << sub->next
<< " have " << epoch << dendl;
if (sub->next <= epoch) {
mon.send_latest_monmap(sub->session->con.get());
if (sub->onetime) {
mon.with_session_map([sub](MonSessionMap& session_map) {
session_map.remove_sub(sub);
});
} else {
sub->next = epoch + 1;
}
}
}
void MonmapMonitor::tick()
{
if (!is_active() ||
!mon.is_leader()) {
return;
}
if (mon.monmap->created.is_zero()) {
dout(10) << __func__ << " detected empty created stamp" << dendl;
utime_t ctime;
for (version_t v = 1; v <= get_last_committed(); v++) {
bufferlist bl;
int r = get_version(v, bl);
if (r < 0) {
continue;
}
MonMap m;
auto p = bl.cbegin();
decode(m, p);
if (!m.last_changed.is_zero()) {
dout(10) << __func__ << " first monmap with last_changed is "
<< v << " with " << m.last_changed << dendl;
ctime = m.last_changed;
break;
}
}
if (ctime.is_zero()) {
ctime = ceph_clock_now();
}
dout(10) << __func__ << " updating created stamp to " << ctime << dendl;
pending_map.created = ctime;
propose_pending();
}
}
| 45,188 | 29.845734 | 99 |
cc
|
null |
ceph-main/src/mon/MonmapMonitor.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/*
* The Monmap Monitor is used to track the monitors in the cluster.
*/
#ifndef CEPH_MONMAPMONITOR_H
#define CEPH_MONMAPMONITOR_H
#include <map>
#include <set>
#include "include/types.h"
#include "msg/Messenger.h"
#include "PaxosService.h"
#include "MonMap.h"
#include "MonitorDBStore.h"
class MonmapMonitor : public PaxosService {
public:
MonmapMonitor(Monitor &mn, Paxos &p, const std::string& service_name)
: PaxosService(mn, p, service_name)
{
}
MonMap pending_map; //the pending map awaiting passage
void create_initial() override;
void update_from_paxos(bool *need_bootstrap) override;
void create_pending() override;
void encode_pending(MonitorDBStore::TransactionRef t) override;
// we always encode the full map; we have no use for full versions
void encode_full(MonitorDBStore::TransactionRef t) override { }
void on_active() override;
void apply_mon_features(const mon_feature_t& features,
ceph_release_t min_mon_release);
void dump_info(ceph::Formatter *f);
bool preprocess_query(MonOpRequestRef op) override;
bool prepare_update(MonOpRequestRef op) override;
bool preprocess_join(MonOpRequestRef op);
bool prepare_join(MonOpRequestRef op);
bool preprocess_command(MonOpRequestRef op);
bool prepare_command(MonOpRequestRef op);
int get_monmap(ceph::buffer::list &bl);
/*
* Since monitors are pretty
* important, this implementation will just write 0.0.
*/
bool should_propose(double& delay) override;
void check_sub(Subscription *sub);
void tick() override;
private:
void check_subs();
ceph::buffer::list monmap_bl;
/**
* Check validity of inputs and monitor state to
* engage stretch mode. Designed to be used with
* OSDMonitor::try_enable_stretch_mode() where we call both twice,
* first with commit=false to validate.
* @param ss: a stringstream to write errors into
* @param okay: Filled to true if okay, false if validation fails
* @param errcode: filled with -errno if there's a problem
* @param commit: true if we should commit the change, false if just testing
* @param tiebreaker_mon: the name of the monitor to declare tiebreaker
* @param dividing_bucket: the bucket type (eg 'dc') that divides the cluster
*/
void try_enable_stretch_mode(std::stringstream& ss, bool *okay,
int *errcode, bool commit,
const std::string& tiebreaker_mon,
const std::string& dividing_bucket);
public:
/**
* Set us to degraded stretch mode. Put the dead_mons in
* the MonMap.
*/
void trigger_degraded_stretch_mode(const std::set<std::string>& dead_mons);
/**
* Set us to healthy stretch mode: clear out the
* down list to allow any non-tiebreaker mon to be the leader again.
*/
void trigger_healthy_stretch_mode();
};
#endif
| 3,239 | 27.928571 | 79 |
h
|
null |
ceph-main/src/mon/OSDMonitor.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <algorithm>
#include <boost/algorithm/string.hpp>
#include <experimental/iterator>
#include <locale>
#include <sstream>
#include "mon/OSDMonitor.h"
#include "mon/Monitor.h"
#include "mon/MDSMonitor.h"
#include "mon/MgrStatMonitor.h"
#include "mon/AuthMonitor.h"
#include "mon/KVMonitor.h"
#include "mon/MonitorDBStore.h"
#include "mon/Session.h"
#include "crush/CrushWrapper.h"
#include "crush/CrushTester.h"
#include "crush/CrushTreeDumper.h"
#include "messages/MOSDBeacon.h"
#include "messages/MOSDFailure.h"
#include "messages/MOSDMarkMeDown.h"
#include "messages/MOSDMarkMeDead.h"
#include "messages/MOSDFull.h"
#include "messages/MOSDMap.h"
#include "messages/MMonGetOSDMap.h"
#include "messages/MOSDBoot.h"
#include "messages/MOSDAlive.h"
#include "messages/MPoolOp.h"
#include "messages/MPoolOpReply.h"
#include "messages/MOSDPGCreate2.h"
#include "messages/MOSDPGCreated.h"
#include "messages/MOSDPGTemp.h"
#include "messages/MOSDPGReadyToMerge.h"
#include "messages/MMonCommand.h"
#include "messages/MRemoveSnaps.h"
#include "messages/MRoute.h"
#include "messages/MMonGetPurgedSnaps.h"
#include "messages/MMonGetPurgedSnapsReply.h"
#include "common/TextTable.h"
#include "common/Timer.h"
#include "common/ceph_argparse.h"
#include "common/perf_counters.h"
#include "common/PriorityCache.h"
#include "common/strtol.h"
#include "common/numa.h"
#include "common/config.h"
#include "common/errno.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "compressor/Compressor.h"
#include "common/Checksummer.h"
#include "include/compat.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "include/util.h"
#include "common/cmdparse.h"
#include "include/str_list.h"
#include "include/str_map.h"
#include "include/scope_guard.h"
#include "perfglue/heap_profiler.h"
#include "auth/cephx/CephxKeyServer.h"
#include "osd/OSDCap.h"
#include "json_spirit/json_spirit_reader.h"
#include <boost/algorithm/string/predicate.hpp>
using std::dec;
using std::hex;
using std::list;
using std::map;
using std::make_pair;
using std::ostringstream;
using std::pair;
using std::set;
using std::string;
using std::stringstream;
using std::to_string;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
using ceph::ErasureCodeInterfaceRef;
using ceph::ErasureCodePluginRegistry;
using ceph::ErasureCodeProfile;
using ceph::Formatter;
using ceph::JSONFormatter;
using ceph::make_message;
#define dout_subsys ceph_subsys_mon
static const string OSD_PG_CREATING_PREFIX("osd_pg_creating");
static const string OSD_METADATA_PREFIX("osd_metadata");
static const string OSD_SNAP_PREFIX("osd_snap");
/*
OSD snapshot metadata
---------------------
-- starting with mimic, removed in octopus --
"removed_epoch_%llu_%08lx" % (pool, epoch)
-> interval_set<snapid_t>
"removed_snap_%llu_%016llx" % (pool, last_snap)
-> { first_snap, end_snap, epoch } (last_snap = end_snap - 1)
-- starting with mimic --
"purged_snap_%llu_%016llx" % (pool, last_snap)
-> { first_snap, end_snap, epoch } (last_snap = end_snap - 1)
- note that the {removed,purged}_snap put the last snap in they key so
that we can use forward iteration only to search for an epoch in an
interval. e.g., to test if epoch N is removed/purged, we'll find a key
>= N that either does or doesn't contain the given snap.
-- starting with octopus --
"purged_epoch_%08lx" % epoch
-> map<int64_t,interval_set<snapid_t>>
*/
using namespace TOPNSPC::common;
namespace {
struct OSDMemCache : public PriorityCache::PriCache {
OSDMonitor *osdmon;
int64_t cache_bytes[PriorityCache::Priority::LAST+1] = {0};
int64_t committed_bytes = 0;
double cache_ratio = 0;
OSDMemCache(OSDMonitor *m) : osdmon(m) {};
virtual uint64_t _get_used_bytes() const = 0;
virtual int64_t request_cache_bytes(
PriorityCache::Priority pri, uint64_t total_cache) const {
int64_t assigned = get_cache_bytes(pri);
switch (pri) {
// All cache items are currently set to have PRI1 priority
case PriorityCache::Priority::PRI1:
{
int64_t request = _get_used_bytes();
return (request > assigned) ? request - assigned : 0;
}
default:
break;
}
return -EOPNOTSUPP;
}
virtual int64_t get_cache_bytes(PriorityCache::Priority pri) const {
return cache_bytes[pri];
}
virtual int64_t get_cache_bytes() const {
int64_t total = 0;
for (int i = 0; i < PriorityCache::Priority::LAST + 1; i++) {
PriorityCache::Priority pri = static_cast<PriorityCache::Priority>(i);
total += get_cache_bytes(pri);
}
return total;
}
virtual void set_cache_bytes(PriorityCache::Priority pri, int64_t bytes) {
cache_bytes[pri] = bytes;
}
virtual void add_cache_bytes(PriorityCache::Priority pri, int64_t bytes) {
cache_bytes[pri] += bytes;
}
virtual int64_t commit_cache_size(uint64_t total_cache) {
committed_bytes = PriorityCache::get_chunk(
get_cache_bytes(), total_cache);
return committed_bytes;
}
virtual int64_t get_committed_size() const {
return committed_bytes;
}
virtual double get_cache_ratio() const {
return cache_ratio;
}
virtual void set_cache_ratio(double ratio) {
cache_ratio = ratio;
}
virtual void shift_bins() {
}
virtual void import_bins(const std::vector<uint64_t> &bins) {
}
virtual void set_bins(PriorityCache::Priority pri, uint64_t end_bin) {
}
virtual uint64_t get_bins(PriorityCache::Priority pri) const {
return 0;
}
virtual string get_cache_name() const = 0;
};
struct IncCache : public OSDMemCache {
IncCache(OSDMonitor *m) : OSDMemCache(m) {};
virtual uint64_t _get_used_bytes() const {
return osdmon->inc_osd_cache.get_bytes();
}
virtual string get_cache_name() const {
return "OSDMap Inc Cache";
}
uint64_t _get_num_osdmaps() const {
return osdmon->inc_osd_cache.get_size();
}
};
struct FullCache : public OSDMemCache {
FullCache(OSDMonitor *m) : OSDMemCache(m) {};
virtual uint64_t _get_used_bytes() const {
return osdmon->full_osd_cache.get_bytes();
}
virtual string get_cache_name() const {
return "OSDMap Full Cache";
}
uint64_t _get_num_osdmaps() const {
return osdmon->full_osd_cache.get_size();
}
};
std::shared_ptr<IncCache> inc_cache;
std::shared_ptr<FullCache> full_cache;
const uint32_t MAX_POOL_APPLICATIONS = 4;
const uint32_t MAX_POOL_APPLICATION_KEYS = 64;
const uint32_t MAX_POOL_APPLICATION_LENGTH = 128;
bool is_osd_writable(const OSDCapGrant& grant, const std::string* pool_name) {
// Note: this doesn't include support for the application tag match
if ((grant.spec.allow & OSD_CAP_W) != 0) {
auto& match = grant.match;
if (match.is_match_all()) {
return true;
} else if (pool_name != nullptr &&
!match.pool_namespace.pool_name.empty() &&
match.pool_namespace.pool_name == *pool_name) {
return true;
}
}
return false;
}
bool is_unmanaged_snap_op_permitted(CephContext* cct,
const KeyServer& key_server,
const EntityName& entity_name,
const MonCap& mon_caps,
const entity_addr_t& peer_socket_addr,
const std::string* pool_name)
{
typedef std::map<std::string, std::string> CommandArgs;
if (mon_caps.is_capable(
cct, entity_name, "osd",
"osd pool op unmanaged-snap",
(pool_name == nullptr ?
CommandArgs{} /* pool DNE, require unrestricted cap */ :
CommandArgs{{"poolname", *pool_name}}),
false, true, false,
peer_socket_addr)) {
return true;
}
AuthCapsInfo caps_info;
if (!key_server.get_service_caps(entity_name, CEPH_ENTITY_TYPE_OSD,
caps_info)) {
dout(10) << "unable to locate OSD cap data for " << entity_name
<< " in auth db" << dendl;
return false;
}
string caps_str;
if (caps_info.caps.length() > 0) {
auto p = caps_info.caps.cbegin();
try {
decode(caps_str, p);
} catch (const ceph::buffer::error &err) {
derr << "corrupt OSD cap data for " << entity_name << " in auth db"
<< dendl;
return false;
}
}
OSDCap osd_cap;
if (!osd_cap.parse(caps_str, nullptr)) {
dout(10) << "unable to parse OSD cap data for " << entity_name
<< " in auth db" << dendl;
return false;
}
// if the entity has write permissions in one or all pools, permit
// usage of unmanaged-snapshots
if (osd_cap.allow_all()) {
return true;
}
for (auto& grant : osd_cap.grants) {
if (grant.profile.is_valid()) {
for (auto& profile_grant : grant.profile_grants) {
if (is_osd_writable(profile_grant, pool_name)) {
return true;
}
}
} else if (is_osd_writable(grant, pool_name)) {
return true;
}
}
return false;
}
} // anonymous namespace
void LastEpochClean::Lec::report(unsigned pg_num, ps_t ps,
epoch_t last_epoch_clean)
{
if (ps >= pg_num) {
// removed PG
return;
}
epoch_by_pg.resize(pg_num, 0);
const auto old_lec = epoch_by_pg[ps];
if (old_lec >= last_epoch_clean) {
// stale lec
return;
}
epoch_by_pg[ps] = last_epoch_clean;
if (last_epoch_clean < floor) {
floor = last_epoch_clean;
} else if (last_epoch_clean > floor) {
if (old_lec == floor) {
// probably should increase floor?
auto new_floor = std::min_element(std::begin(epoch_by_pg),
std::end(epoch_by_pg));
floor = *new_floor;
}
}
if (ps != next_missing) {
return;
}
for (; next_missing < epoch_by_pg.size(); next_missing++) {
if (epoch_by_pg[next_missing] == 0) {
break;
}
}
}
void LastEpochClean::remove_pool(uint64_t pool)
{
report_by_pool.erase(pool);
}
void LastEpochClean::report(unsigned pg_num, const pg_t& pg,
epoch_t last_epoch_clean)
{
auto& lec = report_by_pool[pg.pool()];
return lec.report(pg_num, pg.ps(), last_epoch_clean);
}
epoch_t LastEpochClean::get_lower_bound(const OSDMap& latest) const
{
auto floor = latest.get_epoch();
for (auto& pool : latest.get_pools()) {
auto reported = report_by_pool.find(pool.first);
if (reported == report_by_pool.end()) {
return 0;
}
if (reported->second.next_missing < pool.second.get_pg_num()) {
return 0;
}
if (reported->second.floor < floor) {
floor = reported->second.floor;
}
}
return floor;
}
void LastEpochClean::dump(Formatter *f) const
{
f->open_array_section("per_pool");
for (auto& [pool, lec] : report_by_pool) {
f->open_object_section("pool");
f->dump_unsigned("poolid", pool);
f->dump_unsigned("floor", lec.floor);
f->close_section();
}
f->close_section();
}
class C_UpdateCreatingPGs : public Context {
public:
OSDMonitor *osdmon;
utime_t start;
epoch_t epoch;
C_UpdateCreatingPGs(OSDMonitor *osdmon, epoch_t e) :
osdmon(osdmon), start(ceph_clock_now()), epoch(e) {}
void finish(int r) override {
if (r >= 0) {
utime_t end = ceph_clock_now();
dout(10) << "osdmap epoch " << epoch << " mapping took "
<< (end - start) << " seconds" << dendl;
osdmon->update_creating_pgs();
osdmon->check_pg_creates_subs();
}
}
};
#undef dout_prefix
#define dout_prefix _prefix(_dout, mon, osdmap)
static ostream& _prefix(std::ostream *_dout, Monitor &mon, const OSDMap& osdmap) {
return *_dout << "mon." << mon.name << "@" << mon.rank
<< "(" << mon.get_state_name()
<< ").osd e" << osdmap.get_epoch() << " ";
}
OSDMonitor::OSDMonitor(
CephContext *cct,
Monitor &mn,
Paxos &p,
const string& service_name)
: PaxosService(mn, p, service_name),
cct(cct),
inc_osd_cache(g_conf()->mon_osd_cache_size),
full_osd_cache(g_conf()->mon_osd_cache_size),
has_osdmap_manifest(false),
mapper(mn.cct, &mn.cpu_tp)
{
inc_cache = std::make_shared<IncCache>(this);
full_cache = std::make_shared<FullCache>(this);
cct->_conf.add_observer(this);
int r = _set_cache_sizes();
if (r < 0) {
derr << __func__ << " using default osd cache size - mon_osd_cache_size ("
<< g_conf()->mon_osd_cache_size
<< ") without priority cache management"
<< dendl;
}
}
const char **OSDMonitor::get_tracked_conf_keys() const
{
static const char* KEYS[] = {
"mon_memory_target",
"mon_memory_autotune",
"rocksdb_cache_size",
NULL
};
return KEYS;
}
void OSDMonitor::handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed)
{
dout(10) << __func__ << " " << changed << dendl;
if (changed.count("mon_memory_autotune")) {
_set_cache_autotuning();
}
if (changed.count("mon_memory_target") ||
changed.count("rocksdb_cache_size")) {
int r = _update_mon_cache_settings();
if (r < 0) {
derr << __func__ << " mon_memory_target:"
<< g_conf()->mon_memory_target
<< " rocksdb_cache_size:"
<< g_conf()->rocksdb_cache_size
<< ". Unable to update cache size."
<< dendl;
}
}
}
void OSDMonitor::_set_cache_autotuning()
{
if (!g_conf()->mon_memory_autotune && pcm != nullptr) {
// Disable cache autotuning
std::lock_guard l(balancer_lock);
pcm = nullptr;
}
if (g_conf()->mon_memory_autotune && pcm == nullptr) {
int r = register_cache_with_pcm();
if (r < 0) {
dout(10) << __func__
<< " Error while registering osdmon caches with pcm."
<< " Cache auto tuning not enabled."
<< dendl;
mon_memory_autotune = false;
} else {
mon_memory_autotune = true;
}
}
}
int OSDMonitor::_update_mon_cache_settings()
{
if (g_conf()->mon_memory_target <= 0 ||
g_conf()->mon_memory_target < mon_memory_min ||
g_conf()->rocksdb_cache_size <= 0) {
return -EINVAL;
}
if (pcm == nullptr && rocksdb_binned_kv_cache == nullptr) {
derr << __func__ << " not using pcm and rocksdb" << dendl;
return -EINVAL;
}
uint64_t old_mon_memory_target = mon_memory_target;
uint64_t old_rocksdb_cache_size = rocksdb_cache_size;
// Set the new pcm memory cache sizes
mon_memory_target = g_conf()->mon_memory_target;
rocksdb_cache_size = g_conf()->rocksdb_cache_size;
uint64_t base = mon_memory_base;
double fragmentation = mon_memory_fragmentation;
uint64_t target = mon_memory_target;
uint64_t min = mon_memory_min;
uint64_t max = min;
uint64_t ltarget = (1.0 - fragmentation) * target;
if (ltarget > base + min) {
max = ltarget - base;
}
int r = _set_cache_ratios();
if (r < 0) {
derr << __func__ << " Cache ratios for pcm could not be set."
<< " Review the kv (rocksdb) and mon_memory_target sizes."
<< dendl;
mon_memory_target = old_mon_memory_target;
rocksdb_cache_size = old_rocksdb_cache_size;
return -EINVAL;
}
if (mon_memory_autotune && pcm != nullptr) {
std::lock_guard l(balancer_lock);
// set pcm cache levels
pcm->set_target_memory(target);
pcm->set_min_memory(min);
pcm->set_max_memory(max);
// tune memory based on new values
pcm->tune_memory();
pcm->balance();
_set_new_cache_sizes();
dout(1) << __func__ << " Updated mon cache setting."
<< " target: " << target
<< " min: " << min
<< " max: " << max
<< dendl;
}
return 0;
}
int OSDMonitor::_set_cache_sizes()
{
if (g_conf()->mon_memory_autotune) {
// set the new osdmon cache targets to be managed by pcm
mon_osd_cache_size = g_conf()->mon_osd_cache_size;
rocksdb_cache_size = g_conf()->rocksdb_cache_size;
mon_memory_base = cct->_conf.get_val<Option::size_t>("osd_memory_base");
mon_memory_fragmentation = cct->_conf.get_val<double>("osd_memory_expected_fragmentation");
mon_memory_target = g_conf()->mon_memory_target;
mon_memory_min = g_conf()->mon_osd_cache_size_min;
if (mon_memory_target <= 0 || mon_memory_min <= 0) {
derr << __func__ << " mon_memory_target:" << mon_memory_target
<< " mon_memory_min:" << mon_memory_min
<< ". Invalid size option(s) provided."
<< dendl;
return -EINVAL;
}
// Set the initial inc and full LRU cache sizes
inc_osd_cache.set_bytes(mon_memory_min);
full_osd_cache.set_bytes(mon_memory_min);
mon_memory_autotune = g_conf()->mon_memory_autotune;
}
return 0;
}
bool OSDMonitor::_have_pending_crush()
{
return pending_inc.crush.length() > 0;
}
CrushWrapper &OSDMonitor::_get_stable_crush()
{
return *osdmap.crush;
}
CrushWrapper OSDMonitor::_get_pending_crush()
{
bufferlist bl;
if (pending_inc.crush.length())
bl = pending_inc.crush;
else
osdmap.crush->encode(bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
auto p = bl.cbegin();
CrushWrapper crush;
crush.decode(p);
return crush;
}
void OSDMonitor::create_initial()
{
dout(10) << "create_initial for " << mon.monmap->fsid << dendl;
OSDMap newmap;
bufferlist bl;
mon.store->get("mkfs", "osdmap", bl);
if (bl.length()) {
newmap.decode(bl);
newmap.set_fsid(mon.monmap->fsid);
} else {
newmap.build_simple(cct, 0, mon.monmap->fsid, 0);
}
newmap.set_epoch(1);
newmap.created = newmap.modified = ceph_clock_now();
// new clusters should sort bitwise by default.
newmap.set_flag(CEPH_OSDMAP_SORTBITWISE);
newmap.flags |=
CEPH_OSDMAP_RECOVERY_DELETES |
CEPH_OSDMAP_PURGED_SNAPDIRS |
CEPH_OSDMAP_PGLOG_HARDLIMIT;
newmap.full_ratio = g_conf()->mon_osd_full_ratio;
if (newmap.full_ratio > 1.0) newmap.full_ratio /= 100;
newmap.backfillfull_ratio = g_conf()->mon_osd_backfillfull_ratio;
if (newmap.backfillfull_ratio > 1.0) newmap.backfillfull_ratio /= 100;
newmap.nearfull_ratio = g_conf()->mon_osd_nearfull_ratio;
if (newmap.nearfull_ratio > 1.0) newmap.nearfull_ratio /= 100;
// new cluster should require latest by default
if (g_conf().get_val<bool>("mon_debug_no_require_reef")) {
if (g_conf().get_val<bool>("mon_debug_no_require_quincy")) {
derr << __func__ << " mon_debug_no_require_reef and quincy=true" << dendl;
newmap.require_osd_release = ceph_release_t::pacific;
} else {
derr << __func__ << " mon_debug_no_require_reef=true" << dendl;
newmap.require_osd_release = ceph_release_t::quincy;
}
} else {
newmap.require_osd_release = ceph_release_t::reef;
}
ceph_release_t r = ceph_release_from_name(g_conf()->mon_osd_initial_require_min_compat_client);
if (!r) {
ceph_abort_msg("mon_osd_initial_require_min_compat_client is not valid");
}
newmap.require_min_compat_client = r;
// encode into pending incremental
uint64_t features = newmap.get_encoding_features();
newmap.encode(pending_inc.fullmap,
features | CEPH_FEATURE_RESERVED);
pending_inc.full_crc = newmap.get_crc();
dout(20) << " full crc " << pending_inc.full_crc << dendl;
}
void OSDMonitor::get_store_prefixes(std::set<string>& s) const
{
s.insert(service_name);
s.insert(OSD_PG_CREATING_PREFIX);
s.insert(OSD_METADATA_PREFIX);
s.insert(OSD_SNAP_PREFIX);
}
void OSDMonitor::update_from_paxos(bool *need_bootstrap)
{
// we really don't care if the version has been updated, because we may
// have trimmed without having increased the last committed; yet, we may
// need to update the in-memory manifest.
load_osdmap_manifest();
version_t version = get_last_committed();
if (version == osdmap.epoch)
return;
ceph_assert(version > osdmap.epoch);
dout(15) << "update_from_paxos paxos e " << version
<< ", my e " << osdmap.epoch << dendl;
int prev_num_up_osd = osdmap.num_up_osd;
if (mapping_job) {
if (!mapping_job->is_done()) {
dout(1) << __func__ << " mapping job "
<< mapping_job.get() << " did not complete, "
<< mapping_job->shards << " left, canceling" << dendl;
mapping_job->abort();
}
mapping_job.reset();
}
load_health();
/*
* We will possibly have a stashed latest that *we* wrote, and we will
* always be sure to have the oldest full map in the first..last range
* due to encode_trim_extra(), which includes the oldest full map in the trim
* transaction.
*
* encode_trim_extra() does not however write the full map's
* version to 'full_latest'. This is only done when we are building the
* full maps from the incremental versions. But don't panic! We make sure
* that the following conditions find whichever full map version is newer.
*/
version_t latest_full = get_version_latest_full();
if (latest_full == 0 && get_first_committed() > 1)
latest_full = get_first_committed();
if (get_first_committed() > 1 &&
latest_full < get_first_committed()) {
// the monitor could be just sync'ed with its peer, and the latest_full key
// is not encoded in the paxos commits in encode_pending(), so we need to
// make sure we get it pointing to a proper version.
version_t lc = get_last_committed();
version_t fc = get_first_committed();
dout(10) << __func__ << " looking for valid full map in interval"
<< " [" << fc << ", " << lc << "]" << dendl;
latest_full = 0;
for (version_t v = lc; v >= fc; v--) {
string full_key = "full_" + stringify(v);
if (mon.store->exists(get_service_name(), full_key)) {
dout(10) << __func__ << " found latest full map v " << v << dendl;
latest_full = v;
break;
}
}
ceph_assert(latest_full > 0);
auto t(std::make_shared<MonitorDBStore::Transaction>());
put_version_latest_full(t, latest_full);
mon.store->apply_transaction(t);
dout(10) << __func__ << " updated the on-disk full map version to "
<< latest_full << dendl;
}
if ((latest_full > 0) && (latest_full > osdmap.epoch)) {
bufferlist latest_bl;
get_version_full(latest_full, latest_bl);
ceph_assert(latest_bl.length() != 0);
dout(7) << __func__ << " loading latest full map e" << latest_full << dendl;
osdmap = OSDMap();
osdmap.decode(latest_bl);
}
bufferlist bl;
if (!mon.store->get(OSD_PG_CREATING_PREFIX, "creating", bl)) {
auto p = bl.cbegin();
std::lock_guard<std::mutex> l(creating_pgs_lock);
creating_pgs.decode(p);
dout(7) << __func__ << " loading creating_pgs last_scan_epoch "
<< creating_pgs.last_scan_epoch
<< " with " << creating_pgs.pgs.size() << " pgs" << dendl;
} else {
dout(1) << __func__ << " missing creating pgs; upgrade from post-kraken?"
<< dendl;
}
// walk through incrementals
MonitorDBStore::TransactionRef t;
size_t tx_size = 0;
while (version > osdmap.epoch) {
bufferlist inc_bl;
int err = get_version(osdmap.epoch+1, inc_bl);
ceph_assert(err == 0);
ceph_assert(inc_bl.length());
// set priority cache manager levels if the osdmap is
// being populated for the first time.
if (mon_memory_autotune && pcm == nullptr) {
int r = register_cache_with_pcm();
if (r < 0) {
dout(10) << __func__
<< " Error while registering osdmon caches with pcm."
<< " Proceeding without cache auto tuning."
<< dendl;
}
}
dout(7) << "update_from_paxos applying incremental " << osdmap.epoch+1
<< dendl;
OSDMap::Incremental inc(inc_bl);
err = osdmap.apply_incremental(inc);
ceph_assert(err == 0);
if (!t)
t.reset(new MonitorDBStore::Transaction);
// Write out the full map for all past epochs. Encode the full
// map with the same features as the incremental. If we don't
// know, use the quorum features. If we don't know those either,
// encode with all features.
uint64_t f = inc.encode_features;
if (!f)
f = mon.get_quorum_con_features();
if (!f)
f = -1;
bufferlist full_bl;
osdmap.encode(full_bl, f | CEPH_FEATURE_RESERVED);
tx_size += full_bl.length();
bufferlist orig_full_bl;
get_version_full(osdmap.epoch, orig_full_bl);
if (orig_full_bl.length()) {
// the primary provided the full map
ceph_assert(inc.have_crc);
if (inc.full_crc != osdmap.crc) {
// This will happen if the mons were running mixed versions in
// the past or some other circumstance made the full encoded
// maps divergent. Reloading here will bring us back into
// sync with the primary for this and all future maps. OSDs
// will also be brought back into sync when they discover the
// crc mismatch and request a full map from a mon.
derr << __func__ << " full map CRC mismatch, resetting to canonical"
<< dendl;
dout(20) << __func__ << " my (bad) full osdmap:\n";
JSONFormatter jf(true);
jf.dump_object("osdmap", osdmap);
jf.flush(*_dout);
*_dout << "\nhexdump:\n";
full_bl.hexdump(*_dout);
*_dout << dendl;
osdmap = OSDMap();
osdmap.decode(orig_full_bl);
dout(20) << __func__ << " canonical full osdmap:\n";
JSONFormatter jf(true);
jf.dump_object("osdmap", osdmap);
jf.flush(*_dout);
*_dout << "\nhexdump:\n";
orig_full_bl.hexdump(*_dout);
*_dout << dendl;
}
} else {
ceph_assert(!inc.have_crc);
put_version_full(t, osdmap.epoch, full_bl);
}
put_version_latest_full(t, osdmap.epoch);
// share
dout(1) << osdmap << dendl;
if (osdmap.epoch == 1) {
t->erase("mkfs", "osdmap");
}
if (tx_size > g_conf()->mon_sync_max_payload_size*2) {
mon.store->apply_transaction(t);
t = MonitorDBStore::TransactionRef();
tx_size = 0;
}
for (auto [osd, state] : inc.new_state) {
if (state & CEPH_OSD_UP) {
// could be marked up *or* down, but we're too lazy to check which
last_osd_report.erase(osd);
}
}
for (auto [osd, weight] : inc.new_weight) {
if (weight == CEPH_OSD_OUT) {
// manually marked out, so drop it
osd_epochs.erase(osd);
}
}
}
if (t) {
mon.store->apply_transaction(t);
}
bool marked_osd_down = false;
for (int o = 0; o < osdmap.get_max_osd(); o++) {
if (osdmap.is_out(o))
continue;
auto found = down_pending_out.find(o);
if (osdmap.is_down(o)) {
// populate down -> out map
if (found == down_pending_out.end()) {
dout(10) << " adding osd." << o << " to down_pending_out map" << dendl;
down_pending_out[o] = ceph_clock_now();
marked_osd_down = true;
}
} else {
if (found != down_pending_out.end()) {
dout(10) << " removing osd." << o << " from down_pending_out map" << dendl;
down_pending_out.erase(found);
}
}
}
// XXX: need to trim MonSession connected with a osd whose id > max_osd?
check_osdmap_subs();
check_pg_creates_subs();
share_map_with_random_osd();
update_logger();
process_failures();
// make sure our feature bits reflect the latest map
update_msgr_features();
if (!mon.is_leader()) {
// will be called by on_active() on the leader, avoid doing so twice
start_mapping();
}
if (osdmap.stretch_mode_enabled) {
dout(20) << "Stretch mode enabled in this map" << dendl;
mon.try_engage_stretch_mode();
if (osdmap.degraded_stretch_mode) {
dout(20) << "Degraded stretch mode set in this map" << dendl;
if (!osdmap.recovering_stretch_mode) {
mon.set_degraded_stretch_mode();
dout(20) << "prev_num_up_osd: " << prev_num_up_osd << dendl;
dout(20) << "osdmap.num_up_osd: " << osdmap.num_up_osd << dendl;
dout(20) << "osdmap.num_osd: " << osdmap.num_osd << dendl;
dout(20) << "mon_stretch_cluster_recovery_ratio: " << cct->_conf.get_val<double>("mon_stretch_cluster_recovery_ratio") << dendl;
if (prev_num_up_osd < osdmap.num_up_osd &&
(osdmap.num_up_osd / (double)osdmap.num_osd) >
cct->_conf.get_val<double>("mon_stretch_cluster_recovery_ratio") &&
mon.dead_mon_buckets.size() == 0) {
// TODO: This works for 2-site clusters when the OSD maps are appropriately
// trimmed and everything is "normal" but not if you have a lot of out OSDs
// you're ignoring or in some really degenerate failure cases
dout(10) << "Enabling recovery stretch mode in this map" << dendl;
mon.go_recovery_stretch_mode();
}
} else {
mon.set_recovery_stretch_mode();
}
} else {
mon.set_healthy_stretch_mode();
}
if (marked_osd_down &&
(!osdmap.degraded_stretch_mode || osdmap.recovering_stretch_mode)) {
dout(20) << "Checking degraded stretch mode due to osd changes" << dendl;
mon.maybe_go_degraded_stretch_mode();
}
}
}
int OSDMonitor::register_cache_with_pcm()
{
if (mon_memory_target <= 0 || mon_memory_min <= 0) {
derr << __func__ << " Invalid memory size specified for mon caches."
<< " Caches will not be auto-tuned."
<< dendl;
return -EINVAL;
}
uint64_t base = mon_memory_base;
double fragmentation = mon_memory_fragmentation;
// For calculating total target memory, consider rocksdb cache size.
uint64_t target = mon_memory_target;
uint64_t min = mon_memory_min;
uint64_t max = min;
// Apply the same logic as in bluestore to set the max amount
// of memory to use for cache. Assume base memory for OSDMaps
// and then add in some overhead for fragmentation.
uint64_t ltarget = (1.0 - fragmentation) * target;
if (ltarget > base + min) {
max = ltarget - base;
}
rocksdb_binned_kv_cache = mon.store->get_priority_cache();
if (!rocksdb_binned_kv_cache) {
derr << __func__ << " not using rocksdb" << dendl;
return -EINVAL;
}
int r = _set_cache_ratios();
if (r < 0) {
derr << __func__ << " Cache ratios for pcm could not be set."
<< " Review the kv (rocksdb) and mon_memory_target sizes."
<< dendl;
return -EINVAL;
}
pcm = std::make_shared<PriorityCache::Manager>(
cct, min, max, target, true);
pcm->insert("kv", rocksdb_binned_kv_cache, true);
pcm->insert("inc", inc_cache, true);
pcm->insert("full", full_cache, true);
dout(1) << __func__ << " pcm target: " << target
<< " pcm max: " << max
<< " pcm min: " << min
<< " inc_osd_cache size: " << inc_osd_cache.get_size()
<< dendl;
return 0;
}
int OSDMonitor::_set_cache_ratios()
{
double old_cache_kv_ratio = cache_kv_ratio;
// Set the cache ratios for kv(rocksdb), inc and full caches
cache_kv_ratio = (double)rocksdb_cache_size / (double)mon_memory_target;
if (cache_kv_ratio >= 1.0) {
derr << __func__ << " Cache kv ratio (" << cache_kv_ratio
<< ") must be in range [0,<1.0]."
<< dendl;
cache_kv_ratio = old_cache_kv_ratio;
return -EINVAL;
}
rocksdb_binned_kv_cache->set_cache_ratio(cache_kv_ratio);
cache_inc_ratio = cache_full_ratio = (1.0 - cache_kv_ratio) / 2;
inc_cache->set_cache_ratio(cache_inc_ratio);
full_cache->set_cache_ratio(cache_full_ratio);
dout(1) << __func__ << " kv ratio " << cache_kv_ratio
<< " inc ratio " << cache_inc_ratio
<< " full ratio " << cache_full_ratio
<< dendl;
return 0;
}
void OSDMonitor::start_mapping()
{
// initiate mapping job
if (mapping_job) {
dout(10) << __func__ << " canceling previous mapping_job " << mapping_job.get()
<< dendl;
mapping_job->abort();
}
if (!osdmap.get_pools().empty()) {
auto fin = new C_UpdateCreatingPGs(this, osdmap.get_epoch());
mapping_job = mapping.start_update(osdmap, mapper,
g_conf()->mon_osd_mapping_pgs_per_chunk);
dout(10) << __func__ << " started mapping job " << mapping_job.get()
<< " at " << fin->start << dendl;
mapping_job->set_finish_event(fin);
} else {
dout(10) << __func__ << " no pools, no mapping job" << dendl;
mapping_job = nullptr;
}
}
void OSDMonitor::update_msgr_features()
{
const int types[] = {
entity_name_t::TYPE_OSD,
entity_name_t::TYPE_CLIENT,
entity_name_t::TYPE_MDS,
entity_name_t::TYPE_MON
};
for (int type : types) {
uint64_t mask;
uint64_t features = osdmap.get_features(type, &mask);
if ((mon.messenger->get_policy(type).features_required & mask) != features) {
dout(0) << "crush map has features " << features << ", adjusting msgr requires" << dendl;
ceph::net::Policy p = mon.messenger->get_policy(type);
p.features_required = (p.features_required & ~mask) | features;
mon.messenger->set_policy(type, p);
}
}
}
void OSDMonitor::on_active()
{
update_logger();
if (mon.is_leader()) {
mon.clog->debug() << "osdmap " << osdmap;
if (!priority_convert) {
// Only do this once at start-up
convert_pool_priorities();
priority_convert = true;
}
} else {
list<MonOpRequestRef> ls;
take_all_failures(ls);
while (!ls.empty()) {
MonOpRequestRef op = ls.front();
op->mark_osdmon_event(__func__);
dispatch(op);
ls.pop_front();
}
}
start_mapping();
}
void OSDMonitor::on_restart()
{
last_osd_report.clear();
}
void OSDMonitor::on_shutdown()
{
dout(10) << __func__ << dendl;
if (mapping_job) {
dout(10) << __func__ << " canceling previous mapping_job " << mapping_job.get()
<< dendl;
mapping_job->abort();
}
// discard failure info, waiters
list<MonOpRequestRef> ls;
take_all_failures(ls);
ls.clear();
}
void OSDMonitor::update_logger()
{
dout(10) << "update_logger" << dendl;
mon.cluster_logger->set(l_cluster_num_osd, osdmap.get_num_osds());
mon.cluster_logger->set(l_cluster_num_osd_up, osdmap.get_num_up_osds());
mon.cluster_logger->set(l_cluster_num_osd_in, osdmap.get_num_in_osds());
mon.cluster_logger->set(l_cluster_osd_epoch, osdmap.get_epoch());
}
void OSDMonitor::create_pending()
{
pending_inc = OSDMap::Incremental(osdmap.epoch+1);
pending_inc.fsid = mon.monmap->fsid;
pending_metadata.clear();
pending_metadata_rm.clear();
pending_pseudo_purged_snaps.clear();
dout(10) << "create_pending e " << pending_inc.epoch << dendl;
// safety checks (this shouldn't really happen)
{
if (osdmap.backfillfull_ratio <= 0) {
pending_inc.new_backfillfull_ratio = g_conf()->mon_osd_backfillfull_ratio;
if (pending_inc.new_backfillfull_ratio > 1.0)
pending_inc.new_backfillfull_ratio /= 100;
dout(1) << __func__ << " setting backfillfull_ratio = "
<< pending_inc.new_backfillfull_ratio << dendl;
}
if (osdmap.full_ratio <= 0) {
pending_inc.new_full_ratio = g_conf()->mon_osd_full_ratio;
if (pending_inc.new_full_ratio > 1.0)
pending_inc.new_full_ratio /= 100;
dout(1) << __func__ << " setting full_ratio = "
<< pending_inc.new_full_ratio << dendl;
}
if (osdmap.nearfull_ratio <= 0) {
pending_inc.new_nearfull_ratio = g_conf()->mon_osd_nearfull_ratio;
if (pending_inc.new_nearfull_ratio > 1.0)
pending_inc.new_nearfull_ratio /= 100;
dout(1) << __func__ << " setting nearfull_ratio = "
<< pending_inc.new_nearfull_ratio << dendl;
}
}
}
creating_pgs_t
OSDMonitor::update_pending_pgs(const OSDMap::Incremental& inc,
const OSDMap& nextmap)
{
dout(10) << __func__ << dendl;
creating_pgs_t pending_creatings;
{
std::lock_guard<std::mutex> l(creating_pgs_lock);
pending_creatings = creating_pgs;
}
// check for new or old pools
if (pending_creatings.last_scan_epoch < inc.epoch) {
unsigned queued = 0;
queued += scan_for_creating_pgs(osdmap.get_pools(),
inc.old_pools,
inc.modified,
&pending_creatings);
queued += scan_for_creating_pgs(inc.new_pools,
inc.old_pools,
inc.modified,
&pending_creatings);
dout(10) << __func__ << " " << queued << " pools queued" << dendl;
for (auto deleted_pool : inc.old_pools) {
auto removed = pending_creatings.remove_pool(deleted_pool);
dout(10) << __func__ << " " << removed
<< " pg removed because containing pool deleted: "
<< deleted_pool << dendl;
last_epoch_clean.remove_pool(deleted_pool);
}
// pgmon updates its creating_pgs in check_osd_map() which is called by
// on_active() and check_osd_map() could be delayed if lease expires, so its
// creating_pgs could be stale in comparison with the one of osdmon. let's
// trim them here. otherwise, they will be added back after being erased.
unsigned removed = 0;
for (auto& pg : pending_created_pgs) {
dout(20) << __func__ << " noting created pg " << pg << dendl;
pending_creatings.created_pools.insert(pg.pool());
removed += pending_creatings.pgs.erase(pg);
}
pending_created_pgs.clear();
dout(10) << __func__ << " " << removed
<< " pgs removed because they're created" << dendl;
pending_creatings.last_scan_epoch = osdmap.get_epoch();
}
// filter out any pgs that shouldn't exist.
{
auto i = pending_creatings.pgs.begin();
while (i != pending_creatings.pgs.end()) {
if (!nextmap.pg_exists(i->first)) {
dout(10) << __func__ << " removing pg " << i->first
<< " which should not exist" << dendl;
i = pending_creatings.pgs.erase(i);
} else {
++i;
}
}
}
// process queue
unsigned max = std::max<int64_t>(1, g_conf()->mon_osd_max_creating_pgs);
const auto total = pending_creatings.pgs.size();
while (pending_creatings.pgs.size() < max &&
!pending_creatings.queue.empty()) {
auto p = pending_creatings.queue.begin();
int64_t poolid = p->first;
dout(10) << __func__ << " pool " << poolid
<< " created " << p->second.created
<< " modified " << p->second.modified
<< " [" << p->second.start << "-" << p->second.end << ")"
<< dendl;
int64_t n = std::min<int64_t>(max - pending_creatings.pgs.size(),
p->second.end - p->second.start);
ps_t first = p->second.start;
ps_t end = first + n;
for (ps_t ps = first; ps < end; ++ps) {
const pg_t pgid{ps, static_cast<uint64_t>(poolid)};
// NOTE: use the *current* epoch as the PG creation epoch so that the
// OSD does not have to generate a long set of PastIntervals.
pending_creatings.pgs.emplace(
pgid,
creating_pgs_t::pg_create_info(inc.epoch,
p->second.modified));
dout(10) << __func__ << " adding " << pgid << dendl;
}
p->second.start = end;
if (p->second.done()) {
dout(10) << __func__ << " done with queue for " << poolid << dendl;
pending_creatings.queue.erase(p);
} else {
dout(10) << __func__ << " pool " << poolid
<< " now [" << p->second.start << "-" << p->second.end << ")"
<< dendl;
}
}
dout(10) << __func__ << " queue remaining: " << pending_creatings.queue.size()
<< " pools" << dendl;
if (mon.monmap->min_mon_release >= ceph_release_t::octopus) {
// walk creating pgs' history and past_intervals forward
for (auto& i : pending_creatings.pgs) {
// this mirrors PG::start_peering_interval()
pg_t pgid = i.first;
// this is a bit imprecise, but sufficient?
struct min_size_predicate_t : public IsPGRecoverablePredicate {
const pg_pool_t *pi;
bool operator()(const set<pg_shard_t> &have) const {
return have.size() >= pi->min_size;
}
explicit min_size_predicate_t(const pg_pool_t *i) : pi(i) {}
} min_size_predicate(nextmap.get_pg_pool(pgid.pool()));
vector<int> up, acting;
int up_primary, acting_primary;
nextmap.pg_to_up_acting_osds(
pgid, &up, &up_primary, &acting, &acting_primary);
if (i.second.history.epoch_created == 0) {
// new pg entry, set it up
i.second.up = up;
i.second.acting = acting;
i.second.up_primary = up_primary;
i.second.acting_primary = acting_primary;
i.second.history = pg_history_t(i.second.create_epoch,
i.second.create_stamp);
dout(10) << __func__ << " pg " << pgid << " just added, "
<< " up " << i.second.up
<< " p " << i.second.up_primary
<< " acting " << i.second.acting
<< " p " << i.second.acting_primary
<< " history " << i.second.history
<< " past_intervals " << i.second.past_intervals
<< dendl;
} else {
std::stringstream debug;
if (PastIntervals::check_new_interval(
i.second.acting_primary, acting_primary,
i.second.acting, acting,
i.second.up_primary, up_primary,
i.second.up, up,
i.second.history.same_interval_since,
i.second.history.last_epoch_clean,
&nextmap,
&osdmap,
pgid,
min_size_predicate,
&i.second.past_intervals,
&debug)) {
epoch_t e = inc.epoch;
i.second.history.same_interval_since = e;
if (i.second.up != up) {
i.second.history.same_up_since = e;
}
if (i.second.acting_primary != acting_primary) {
i.second.history.same_primary_since = e;
}
if (pgid.is_split(
osdmap.get_pg_num(pgid.pool()),
nextmap.get_pg_num(pgid.pool()),
nullptr)) {
i.second.history.last_epoch_split = e;
}
dout(10) << __func__ << " pg " << pgid << " new interval,"
<< " up " << i.second.up << " -> " << up
<< " p " << i.second.up_primary << " -> " << up_primary
<< " acting " << i.second.acting << " -> " << acting
<< " p " << i.second.acting_primary << " -> "
<< acting_primary
<< " history " << i.second.history
<< " past_intervals " << i.second.past_intervals
<< dendl;
dout(20) << " debug: " << debug.str() << dendl;
i.second.up = up;
i.second.acting = acting;
i.second.up_primary = up_primary;
i.second.acting_primary = acting_primary;
}
}
}
}
dout(10) << __func__
<< " " << (pending_creatings.pgs.size() - total)
<< "/" << pending_creatings.pgs.size()
<< " pgs added from queued pools" << dendl;
return pending_creatings;
}
void OSDMonitor::maybe_prime_pg_temp()
{
bool all = false;
if (pending_inc.crush.length()) {
dout(10) << __func__ << " new crush map, all" << dendl;
all = true;
}
if (!pending_inc.new_up_client.empty()) {
dout(10) << __func__ << " new up osds, all" << dendl;
all = true;
}
// check for interesting OSDs
set<int> osds;
for (auto p = pending_inc.new_state.begin();
!all && p != pending_inc.new_state.end();
++p) {
if ((p->second & CEPH_OSD_UP) &&
osdmap.is_up(p->first)) {
osds.insert(p->first);
}
}
for (auto p = pending_inc.new_weight.begin();
!all && p != pending_inc.new_weight.end();
++p) {
if (osdmap.exists(p->first) && p->second < osdmap.get_weight(p->first)) {
// weight reduction
osds.insert(p->first);
} else {
dout(10) << __func__ << " osd." << p->first << " weight increase, all"
<< dendl;
all = true;
}
}
if (!all && osds.empty())
return;
if (!all) {
unsigned estimate =
mapping.get_osd_acting_pgs(*osds.begin()).size() * osds.size();
if (estimate > mapping.get_num_pgs() *
g_conf()->mon_osd_prime_pg_temp_max_estimate) {
dout(10) << __func__ << " estimate " << estimate << " pgs on "
<< osds.size() << " osds >= "
<< g_conf()->mon_osd_prime_pg_temp_max_estimate << " of total "
<< mapping.get_num_pgs() << " pgs, all"
<< dendl;
all = true;
} else {
dout(10) << __func__ << " estimate " << estimate << " pgs on "
<< osds.size() << " osds" << dendl;
}
}
OSDMap next;
next.deepish_copy_from(osdmap);
next.apply_incremental(pending_inc);
if (next.get_pools().empty()) {
dout(10) << __func__ << " no pools, no pg_temp priming" << dendl;
} else if (all) {
PrimeTempJob job(next, this);
mapper.queue(&job, g_conf()->mon_osd_mapping_pgs_per_chunk, {});
if (job.wait_for(g_conf()->mon_osd_prime_pg_temp_max_time)) {
dout(10) << __func__ << " done in " << job.get_duration() << dendl;
} else {
dout(10) << __func__ << " did not finish in "
<< g_conf()->mon_osd_prime_pg_temp_max_time
<< ", stopping" << dendl;
job.abort();
}
} else {
dout(10) << __func__ << " " << osds.size() << " interesting osds" << dendl;
utime_t stop = ceph_clock_now();
stop += g_conf()->mon_osd_prime_pg_temp_max_time;
const int chunk = 1000;
int n = chunk;
std::unordered_set<pg_t> did_pgs;
for (auto osd : osds) {
auto& pgs = mapping.get_osd_acting_pgs(osd);
dout(20) << __func__ << " osd." << osd << " " << pgs << dendl;
for (auto pgid : pgs) {
if (!did_pgs.insert(pgid).second) {
continue;
}
prime_pg_temp(next, pgid);
if (--n <= 0) {
n = chunk;
if (ceph_clock_now() > stop) {
dout(10) << __func__ << " consumed more than "
<< g_conf()->mon_osd_prime_pg_temp_max_time
<< " seconds, stopping"
<< dendl;
return;
}
}
}
}
}
}
void OSDMonitor::prime_pg_temp(
const OSDMap& next,
pg_t pgid)
{
// TODO: remove this creating_pgs direct access?
if (creating_pgs.pgs.count(pgid)) {
return;
}
if (!osdmap.pg_exists(pgid)) {
return;
}
vector<int> up, acting;
mapping.get(pgid, &up, nullptr, &acting, nullptr);
vector<int> next_up, next_acting;
int next_up_primary, next_acting_primary;
next.pg_to_up_acting_osds(pgid, &next_up, &next_up_primary,
&next_acting, &next_acting_primary);
if (acting == next_acting &&
!(up != acting && next_up == next_acting))
return; // no change since last epoch
if (acting.empty())
return; // if previously empty now we can be no worse off
const pg_pool_t *pool = next.get_pg_pool(pgid.pool());
if (pool && acting.size() < pool->min_size)
return; // can be no worse off than before
if (next_up == next_acting) {
acting.clear();
dout(20) << __func__ << " next_up == next_acting now, clear pg_temp"
<< dendl;
}
dout(20) << __func__ << " " << pgid << " " << up << "/" << acting
<< " -> " << next_up << "/" << next_acting
<< ", priming " << acting
<< dendl;
{
std::lock_guard l(prime_pg_temp_lock);
// do not touch a mapping if a change is pending
pending_inc.new_pg_temp.emplace(
pgid,
mempool::osdmap::vector<int>(acting.begin(), acting.end()));
}
}
/**
* @note receiving a transaction in this function gives a fair amount of
* freedom to the service implementation if it does need it. It shouldn't.
*/
void OSDMonitor::encode_pending(MonitorDBStore::TransactionRef t)
{
dout(10) << "encode_pending e " << pending_inc.epoch
<< dendl;
if (do_prune(t)) {
dout(1) << __func__ << " osdmap full prune encoded e"
<< pending_inc.epoch << dendl;
}
// finalize up pending_inc
pending_inc.modified = ceph_clock_now();
int r = pending_inc.propagate_base_properties_to_tiers(cct, osdmap);
ceph_assert(r == 0);
if (mapping_job) {
if (!mapping_job->is_done()) {
dout(1) << __func__ << " skipping prime_pg_temp; mapping job "
<< mapping_job.get() << " did not complete, "
<< mapping_job->shards << " left" << dendl;
mapping_job->abort();
} else if (mapping.get_epoch() < osdmap.get_epoch()) {
dout(1) << __func__ << " skipping prime_pg_temp; mapping job "
<< mapping_job.get() << " is prior epoch "
<< mapping.get_epoch() << dendl;
} else {
if (g_conf()->mon_osd_prime_pg_temp) {
maybe_prime_pg_temp();
}
}
} else if (g_conf()->mon_osd_prime_pg_temp) {
dout(1) << __func__ << " skipping prime_pg_temp; mapping job did not start"
<< dendl;
}
mapping_job.reset();
// ensure we don't have blank new_state updates. these are interrpeted as
// CEPH_OSD_UP (and almost certainly not what we want!).
auto p = pending_inc.new_state.begin();
while (p != pending_inc.new_state.end()) {
if (p->second == 0) {
dout(10) << "new_state for osd." << p->first << " is 0, removing" << dendl;
p = pending_inc.new_state.erase(p);
} else {
if (p->second & CEPH_OSD_UP) {
pending_inc.new_last_up_change = pending_inc.modified;
}
++p;
}
}
if (!pending_inc.new_up_client.empty()) {
pending_inc.new_last_up_change = pending_inc.modified;
}
for (auto& i : pending_inc.new_weight) {
if (i.first >= osdmap.max_osd) {
if (i.second) {
// new osd is already marked in
pending_inc.new_last_in_change = pending_inc.modified;
break;
}
} else if (!!i.second != !!osdmap.osd_weight[i.first]) {
// existing osd marked in or out
pending_inc.new_last_in_change = pending_inc.modified;
break;
}
}
{
OSDMap tmp;
tmp.deepish_copy_from(osdmap);
tmp.apply_incremental(pending_inc);
// clean pg_temp mappings
OSDMap::clean_temps(cct, osdmap, tmp, &pending_inc);
// clean inappropriate pg_upmap/pg_upmap_items (if any)
{
// check every upmapped pg for now
// until we could reliably identify certain cases to ignore,
// which is obviously the hard part TBD..
vector<pg_t> pgs_to_check;
tmp.get_upmap_pgs(&pgs_to_check);
if (pgs_to_check.size() <
static_cast<uint64_t>(g_conf()->mon_clean_pg_upmaps_per_chunk * 2)) {
// not enough pgs, do it inline
tmp.clean_pg_upmaps(cct, &pending_inc);
} else {
CleanUpmapJob job(cct, tmp, pending_inc);
mapper.queue(&job, g_conf()->mon_clean_pg_upmaps_per_chunk, pgs_to_check);
job.wait();
}
}
// update creating pgs first so that we can remove the created pgid and
// process the pool flag removal below in the same osdmap epoch.
auto pending_creatings = update_pending_pgs(pending_inc, tmp);
bufferlist creatings_bl;
uint64_t features = CEPH_FEATURES_ALL;
if (mon.monmap->min_mon_release < ceph_release_t::octopus) {
dout(20) << __func__ << " encoding pending pgs without octopus features"
<< dendl;
features &= ~CEPH_FEATURE_SERVER_OCTOPUS;
}
encode(pending_creatings, creatings_bl, features);
t->put(OSD_PG_CREATING_PREFIX, "creating", creatings_bl);
// remove any old (or incompat) POOL_CREATING flags
for (auto& i : tmp.get_pools()) {
if (tmp.require_osd_release < ceph_release_t::nautilus) {
// pre-nautilus OSDMaps shouldn't get this flag.
if (pending_inc.new_pools.count(i.first)) {
pending_inc.new_pools[i.first].flags &= ~pg_pool_t::FLAG_CREATING;
}
}
if (i.second.has_flag(pg_pool_t::FLAG_CREATING) &&
!pending_creatings.still_creating_pool(i.first)) {
dout(10) << __func__ << " done creating pool " << i.first
<< ", clearing CREATING flag" << dendl;
if (pending_inc.new_pools.count(i.first) == 0) {
pending_inc.new_pools[i.first] = i.second;
}
pending_inc.new_pools[i.first].flags &= ~pg_pool_t::FLAG_CREATING;
}
}
// collect which pools are currently affected by
// the near/backfill/full osd(s),
// and set per-pool near/backfill/full flag instead
set<int64_t> full_pool_ids;
set<int64_t> backfillfull_pool_ids;
set<int64_t> nearfull_pool_ids;
tmp.get_full_pools(cct,
&full_pool_ids,
&backfillfull_pool_ids,
&nearfull_pool_ids);
if (full_pool_ids.empty() ||
backfillfull_pool_ids.empty() ||
nearfull_pool_ids.empty()) {
// normal case - no nearfull, backfillfull or full osds
// try cancel any improper nearfull/backfillfull/full pool
// flags first
for (auto &pool: tmp.get_pools()) {
auto p = pool.first;
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_NEARFULL) &&
nearfull_pool_ids.empty()) {
dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p]
<< "'s nearfull flag" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
// load original pool info first!
pending_inc.new_pools[p] = pool.second;
}
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL;
}
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_BACKFILLFULL) &&
backfillfull_pool_ids.empty()) {
dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p]
<< "'s backfillfull flag" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = pool.second;
}
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_BACKFILLFULL;
}
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL) &&
full_pool_ids.empty()) {
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_QUOTA)) {
// set by EQUOTA, skipping
continue;
}
dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p]
<< "'s full flag" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = pool.second;
}
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_FULL;
}
}
}
if (!full_pool_ids.empty()) {
dout(10) << __func__ << " marking pool(s) " << full_pool_ids
<< " as full" << dendl;
for (auto &p: full_pool_ids) {
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL)) {
continue;
}
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = tmp.pools[p];
}
pending_inc.new_pools[p].flags |= pg_pool_t::FLAG_FULL;
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_BACKFILLFULL;
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL;
}
// cancel FLAG_FULL for pools which are no longer full too
for (auto &pool: tmp.get_pools()) {
auto p = pool.first;
if (full_pool_ids.count(p)) {
// skip pools we have just marked as full above
continue;
}
if (!tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL) ||
tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_QUOTA)) {
// don't touch if currently is not full
// or is running out of quota (and hence considered as full)
continue;
}
dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p]
<< "'s full flag" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = pool.second;
}
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_FULL;
}
}
if (!backfillfull_pool_ids.empty()) {
for (auto &p: backfillfull_pool_ids) {
if (full_pool_ids.count(p)) {
// skip pools we have already considered as full above
continue;
}
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_QUOTA)) {
// make sure FLAG_FULL is truly set, so we are safe not
// to set a extra (redundant) FLAG_BACKFILLFULL flag
ceph_assert(tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL));
continue;
}
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_BACKFILLFULL)) {
// don't bother if pool is already marked as backfillfull
continue;
}
dout(10) << __func__ << " marking pool '" << tmp.pool_name[p]
<< "'s as backfillfull" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = tmp.pools[p];
}
pending_inc.new_pools[p].flags |= pg_pool_t::FLAG_BACKFILLFULL;
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL;
}
// cancel FLAG_BACKFILLFULL for pools
// which are no longer backfillfull too
for (auto &pool: tmp.get_pools()) {
auto p = pool.first;
if (full_pool_ids.count(p) || backfillfull_pool_ids.count(p)) {
// skip pools we have just marked as backfillfull/full above
continue;
}
if (!tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_BACKFILLFULL)) {
// and don't touch if currently is not backfillfull
continue;
}
dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p]
<< "'s backfillfull flag" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = pool.second;
}
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_BACKFILLFULL;
}
}
if (!nearfull_pool_ids.empty()) {
for (auto &p: nearfull_pool_ids) {
if (full_pool_ids.count(p) || backfillfull_pool_ids.count(p)) {
continue;
}
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_QUOTA)) {
// make sure FLAG_FULL is truly set, so we are safe not
// to set a extra (redundant) FLAG_NEARFULL flag
ceph_assert(tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL));
continue;
}
if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_NEARFULL)) {
// don't bother if pool is already marked as nearfull
continue;
}
dout(10) << __func__ << " marking pool '" << tmp.pool_name[p]
<< "'s as nearfull" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = tmp.pools[p];
}
pending_inc.new_pools[p].flags |= pg_pool_t::FLAG_NEARFULL;
}
// cancel FLAG_NEARFULL for pools
// which are no longer nearfull too
for (auto &pool: tmp.get_pools()) {
auto p = pool.first;
if (full_pool_ids.count(p) ||
backfillfull_pool_ids.count(p) ||
nearfull_pool_ids.count(p)) {
// skip pools we have just marked as
// nearfull/backfillfull/full above
continue;
}
if (!tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_NEARFULL)) {
// and don't touch if currently is not nearfull
continue;
}
dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p]
<< "'s nearfull flag" << dendl;
if (pending_inc.new_pools.count(p) == 0) {
pending_inc.new_pools[p] = pool.second;
}
pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL;
}
}
// min_compat_client?
if (!tmp.require_min_compat_client) {
auto mv = tmp.get_min_compat_client();
dout(1) << __func__ << " setting require_min_compat_client to currently "
<< "required " << mv << dendl;
mon.clog->info() << "setting require_min_compat_client to currently "
<< "required " << mv;
pending_inc.new_require_min_compat_client = mv;
}
if (osdmap.require_osd_release < ceph_release_t::nautilus &&
tmp.require_osd_release >= ceph_release_t::nautilus) {
dout(10) << __func__ << " first nautilus+ epoch" << dendl;
// add creating flags?
for (auto& i : tmp.get_pools()) {
if (pending_creatings.still_creating_pool(i.first)) {
dout(10) << __func__ << " adding CREATING flag to pool " << i.first
<< dendl;
if (pending_inc.new_pools.count(i.first) == 0) {
pending_inc.new_pools[i.first] = i.second;
}
pending_inc.new_pools[i.first].flags |= pg_pool_t::FLAG_CREATING;
}
}
// adjust blocklist items to all be TYPE_ANY
for (auto& i : tmp.blocklist) {
auto a = i.first;
a.set_type(entity_addr_t::TYPE_ANY);
pending_inc.new_blocklist[a] = i.second;
pending_inc.old_blocklist.push_back(i.first);
}
}
if (osdmap.require_osd_release < ceph_release_t::octopus &&
tmp.require_osd_release >= ceph_release_t::octopus) {
dout(10) << __func__ << " first octopus+ epoch" << dendl;
// adjust obsoleted cache modes
for (auto& [poolid, pi] : tmp.pools) {
if (pi.cache_mode == pg_pool_t::CACHEMODE_FORWARD) {
if (pending_inc.new_pools.count(poolid) == 0) {
pending_inc.new_pools[poolid] = pi;
}
dout(10) << __func__ << " switching pool " << poolid
<< " cachemode from forward -> proxy" << dendl;
pending_inc.new_pools[poolid].cache_mode = pg_pool_t::CACHEMODE_PROXY;
}
if (pi.cache_mode == pg_pool_t::CACHEMODE_READFORWARD) {
if (pending_inc.new_pools.count(poolid) == 0) {
pending_inc.new_pools[poolid] = pi;
}
dout(10) << __func__ << " switching pool " << poolid
<< " cachemode from readforward -> readproxy" << dendl;
pending_inc.new_pools[poolid].cache_mode =
pg_pool_t::CACHEMODE_READPROXY;
}
}
// clear removed_snaps for every pool
for (auto& [poolid, pi] : tmp.pools) {
if (pi.removed_snaps.empty()) {
continue;
}
if (pending_inc.new_pools.count(poolid) == 0) {
pending_inc.new_pools[poolid] = pi;
}
dout(10) << __func__ << " clearing pool " << poolid << " removed_snaps"
<< dendl;
pending_inc.new_pools[poolid].removed_snaps.clear();
}
// create a combined purged snap epoch key for all purged snaps
// prior to this epoch, and store it in the current epoch (i.e.,
// the last pre-octopus epoch, just prior to the one we're
// encoding now).
auto it = mon.store->get_iterator(OSD_SNAP_PREFIX);
it->lower_bound("purged_snap_");
map<int64_t,snap_interval_set_t> combined;
while (it->valid()) {
if (it->key().find("purged_snap_") != 0) {
break;
}
string k = it->key();
long long unsigned pool;
int n = sscanf(k.c_str(), "purged_snap_%llu_", &pool);
if (n != 1) {
derr << __func__ << " invalid purged_snaps key '" << k << "'" << dendl;
} else {
bufferlist v = it->value();
auto p = v.cbegin();
snapid_t begin, end;
ceph::decode(begin, p);
ceph::decode(end, p);
combined[pool].insert(begin, end - begin);
}
it->next();
}
if (!combined.empty()) {
string k = make_purged_snap_epoch_key(pending_inc.epoch - 1);
bufferlist v;
ceph::encode(combined, v);
t->put(OSD_SNAP_PREFIX, k, v);
dout(10) << __func__ << " recording pre-octopus purged_snaps in epoch "
<< (pending_inc.epoch - 1) << ", " << v.length() << " bytes"
<< dendl;
} else {
dout(10) << __func__ << " there were no pre-octopus purged snaps"
<< dendl;
}
// clean out the old removed_snap_ and removed_epoch keys
// ('`' is ASCII '_' + 1)
t->erase_range(OSD_SNAP_PREFIX, "removed_snap_", "removed_snap`");
t->erase_range(OSD_SNAP_PREFIX, "removed_epoch_", "removed_epoch`");
}
}
// tell me about it
for (auto i = pending_inc.new_state.begin();
i != pending_inc.new_state.end();
++i) {
int s = i->second ? i->second : CEPH_OSD_UP;
if (s & CEPH_OSD_UP) {
dout(2) << " osd." << i->first << " DOWN" << dendl;
// Reset laggy parameters if failure interval exceeds a threshold.
const osd_xinfo_t& xi = osdmap.get_xinfo(i->first);
if ((xi.laggy_probability || xi.laggy_interval) && xi.down_stamp.sec()) {
int last_failure_interval = pending_inc.modified.sec() - xi.down_stamp.sec();
if (grace_interval_threshold_exceeded(last_failure_interval)) {
set_default_laggy_params(i->first);
}
}
}
if (s & CEPH_OSD_EXISTS)
dout(2) << " osd." << i->first << " DNE" << dendl;
}
for (auto i = pending_inc.new_up_client.begin();
i != pending_inc.new_up_client.end();
++i) {
//FIXME: insert cluster addresses too
dout(2) << " osd." << i->first << " UP " << i->second << dendl;
}
for (map<int32_t,uint32_t>::iterator i = pending_inc.new_weight.begin();
i != pending_inc.new_weight.end();
++i) {
if (i->second == CEPH_OSD_OUT) {
dout(2) << " osd." << i->first << " OUT" << dendl;
} else if (i->second == CEPH_OSD_IN) {
dout(2) << " osd." << i->first << " IN" << dendl;
} else {
dout(2) << " osd." << i->first << " WEIGHT " << hex << i->second << dec << dendl;
}
}
// features for osdmap and its incremental
uint64_t features;
// encode full map and determine its crc
OSDMap tmp;
{
tmp.deepish_copy_from(osdmap);
tmp.apply_incremental(pending_inc);
// determine appropriate features
features = tmp.get_encoding_features();
dout(10) << __func__ << " encoding full map with "
<< tmp.require_osd_release
<< " features " << features << dendl;
// the features should be a subset of the mon quorum's features!
ceph_assert((features & ~mon.get_quorum_con_features()) == 0);
bufferlist fullbl;
encode(tmp, fullbl, features | CEPH_FEATURE_RESERVED);
pending_inc.full_crc = tmp.get_crc();
// include full map in the txn. note that old monitors will
// overwrite this. new ones will now skip the local full map
// encode and reload from this.
put_version_full(t, pending_inc.epoch, fullbl);
}
// encode
ceph_assert(get_last_committed() + 1 == pending_inc.epoch);
bufferlist bl;
encode(pending_inc, bl, features | CEPH_FEATURE_RESERVED);
dout(20) << " full_crc " << tmp.get_crc()
<< " inc_crc " << pending_inc.inc_crc << dendl;
/* put everything in the transaction */
put_version(t, pending_inc.epoch, bl);
put_last_committed(t, pending_inc.epoch);
// metadata, too!
for (map<int,bufferlist>::iterator p = pending_metadata.begin();
p != pending_metadata.end();
++p) {
Metadata m;
auto mp = p->second.cbegin();
decode(m, mp);
t->put(OSD_METADATA_PREFIX, stringify(p->first), p->second);
}
for (set<int>::iterator p = pending_metadata_rm.begin();
p != pending_metadata_rm.end();
++p) {
t->erase(OSD_METADATA_PREFIX, stringify(*p));
}
pending_metadata.clear();
pending_metadata_rm.clear();
// purged_snaps
if (tmp.require_osd_release >= ceph_release_t::octopus &&
!pending_inc.new_purged_snaps.empty()) {
// all snaps purged this epoch (across all pools)
string k = make_purged_snap_epoch_key(pending_inc.epoch);
bufferlist v;
encode(pending_inc.new_purged_snaps, v);
t->put(OSD_SNAP_PREFIX, k, v);
}
for (auto& i : pending_inc.new_purged_snaps) {
for (auto q = i.second.begin();
q != i.second.end();
++q) {
insert_purged_snap_update(i.first, q.get_start(), q.get_end(),
pending_inc.epoch,
t);
}
}
for (auto& [pool, snaps] : pending_pseudo_purged_snaps) {
for (auto snap : snaps) {
insert_purged_snap_update(pool, snap, snap + 1,
pending_inc.epoch,
t);
}
}
// health
health_check_map_t next;
tmp.check_health(cct, &next);
encode_health(next, t);
}
int OSDMonitor::load_metadata(int osd, map<string, string>& m, ostream *err)
{
bufferlist bl;
int r = mon.store->get(OSD_METADATA_PREFIX, stringify(osd), bl);
if (r < 0)
return r;
try {
auto p = bl.cbegin();
decode(m, p);
}
catch (ceph::buffer::error& e) {
if (err)
*err << "osd." << osd << " metadata is corrupt";
return -EIO;
}
return 0;
}
void OSDMonitor::count_metadata(const string& field, map<string,int> *out)
{
for (int osd = 0; osd < osdmap.get_max_osd(); ++osd) {
if (osdmap.is_up(osd)) {
map<string,string> meta;
load_metadata(osd, meta, nullptr);
auto p = meta.find(field);
if (p == meta.end()) {
(*out)["unknown"]++;
} else {
(*out)[p->second]++;
}
}
}
}
void OSDMonitor::count_metadata(const string& field, Formatter *f)
{
map<string,int> by_val;
count_metadata(field, &by_val);
f->open_object_section(field.c_str());
for (auto& p : by_val) {
f->dump_int(p.first.c_str(), p.second);
}
f->close_section();
}
void OSDMonitor::get_versions(std::map<string, list<string>> &versions)
{
for (int osd = 0; osd < osdmap.get_max_osd(); ++osd) {
if (osdmap.is_up(osd)) {
map<string,string> meta;
load_metadata(osd, meta, nullptr);
auto p = meta.find("ceph_version_short");
if (p == meta.end()) continue;
versions[p->second].push_back(string("osd.") + stringify(osd));
}
}
}
int OSDMonitor::get_osd_objectstore_type(int osd, string *type)
{
map<string, string> metadata;
int r = load_metadata(osd, metadata, nullptr);
if (r < 0)
return r;
auto it = metadata.find("osd_objectstore");
if (it == metadata.end())
return -ENOENT;
*type = it->second;
return 0;
}
bool OSDMonitor::is_pool_currently_all_bluestore(int64_t pool_id,
const pg_pool_t &pool,
ostream *err)
{
// just check a few pgs for efficiency - this can't give a guarantee anyway,
// since filestore osds could always join the pool later
set<int> checked_osds;
for (unsigned ps = 0; ps < std::min(8u, pool.get_pg_num()); ++ps) {
vector<int> up, acting;
pg_t pgid(ps, pool_id);
osdmap.pg_to_up_acting_osds(pgid, up, acting);
for (int osd : up) {
if (checked_osds.find(osd) != checked_osds.end())
continue;
string objectstore_type;
int r = get_osd_objectstore_type(osd, &objectstore_type);
// allow with missing metadata, e.g. due to an osd never booting yet
if (r < 0 || objectstore_type == "bluestore") {
checked_osds.insert(osd);
continue;
}
*err << "osd." << osd << " uses " << objectstore_type;
return false;
}
}
return true;
}
int OSDMonitor::dump_osd_metadata(int osd, Formatter *f, ostream *err)
{
map<string,string> m;
if (int r = load_metadata(osd, m, err))
return r;
for (map<string,string>::iterator p = m.begin(); p != m.end(); ++p)
f->dump_string(p->first.c_str(), p->second);
return 0;
}
void OSDMonitor::print_nodes(Formatter *f)
{
// group OSDs by their hosts
map<string, list<int> > osds; // hostname => osd
for (int osd = 0; osd < osdmap.get_max_osd(); osd++) {
map<string, string> m;
if (load_metadata(osd, m, NULL)) {
continue;
}
map<string, string>::iterator hostname = m.find("hostname");
if (hostname == m.end()) {
// not likely though
continue;
}
osds[hostname->second].push_back(osd);
}
dump_services(f, osds, "osd");
}
void OSDMonitor::share_map_with_random_osd()
{
if (osdmap.get_num_up_osds() == 0) {
dout(10) << __func__ << " no up osds, don't share with anyone" << dendl;
return;
}
MonSession *s = mon.session_map.get_random_osd_session(&osdmap);
if (!s) {
dout(10) << __func__ << " no up osd on our session map" << dendl;
return;
}
dout(10) << "committed, telling random " << s->name
<< " all about it" << dendl;
// get feature of the peer
// use quorum_con_features, if it's an anonymous connection.
uint64_t features = s->con_features ? s->con_features :
mon.get_quorum_con_features();
// whatev, they'll request more if they need it
MOSDMap *m = build_incremental(osdmap.get_epoch() - 1, osdmap.get_epoch(), features);
s->con->send_message(m);
// NOTE: do *not* record osd has up to this epoch (as we do
// elsewhere) as they may still need to request older values.
}
version_t OSDMonitor::get_trim_to() const
{
if (mon.get_quorum().empty()) {
dout(10) << __func__ << " quorum not formed, trim_to = 0" << dendl;
return 0;
}
{
std::lock_guard<std::mutex> l(creating_pgs_lock);
if (!creating_pgs.pgs.empty()) {
dout(10) << __func__ << " pgs creating, trim_to = 0" << dendl;
return 0;
}
}
if (g_conf().get_val<bool>("mon_debug_block_osdmap_trim")) {
dout(0) << __func__
<< " blocking osdmap trim"
<< " ('mon_debug_block_osdmap_trim' set to 'true')"
<< " trim_to = 0" << dendl;
return 0;
}
{
epoch_t floor = get_min_last_epoch_clean();
dout(10) << " min_last_epoch_clean " << floor << dendl;
if (g_conf()->mon_osd_force_trim_to > 0 &&
g_conf()->mon_osd_force_trim_to < (int)get_last_committed()) {
floor = g_conf()->mon_osd_force_trim_to;
dout(10) << __func__
<< " explicit mon_osd_force_trim_to = " << floor << dendl;
}
unsigned min = g_conf()->mon_min_osdmap_epochs;
if (floor + min > get_last_committed()) {
if (min < get_last_committed())
floor = get_last_committed() - min;
else
floor = 0;
}
if (floor > get_first_committed()) {
dout(10) << __func__ << " trim_to = " << floor << dendl;
return floor;
}
}
dout(10) << __func__ << " trim_to = 0" << dendl;
return 0;
}
epoch_t OSDMonitor::get_min_last_epoch_clean() const
{
auto floor = last_epoch_clean.get_lower_bound(osdmap);
// also scan osd epochs
// don't trim past the oldest reported osd epoch
for (auto [osd, epoch] : osd_epochs) {
if (epoch < floor) {
floor = epoch;
}
}
return floor;
}
void OSDMonitor::encode_trim_extra(MonitorDBStore::TransactionRef tx,
version_t first)
{
dout(10) << __func__ << " including full map for e " << first << dendl;
bufferlist bl;
get_version_full(first, bl);
put_version_full(tx, first, bl);
if (has_osdmap_manifest &&
first > osdmap_manifest.get_first_pinned()) {
_prune_update_trimmed(tx, first);
}
}
/* full osdmap prune
*
* for more information, please refer to doc/dev/mon-osdmap-prune.rst
*/
void OSDMonitor::load_osdmap_manifest()
{
bool store_has_manifest =
mon.store->exists(get_service_name(), "osdmap_manifest");
if (!store_has_manifest) {
if (!has_osdmap_manifest) {
return;
}
dout(20) << __func__
<< " dropping osdmap manifest from memory." << dendl;
osdmap_manifest = osdmap_manifest_t();
has_osdmap_manifest = false;
return;
}
dout(20) << __func__
<< " osdmap manifest detected in store; reload." << dendl;
bufferlist manifest_bl;
int r = get_value("osdmap_manifest", manifest_bl);
if (r < 0) {
derr << __func__ << " unable to read osdmap version manifest" << dendl;
ceph_abort_msg("error reading manifest");
}
osdmap_manifest.decode(manifest_bl);
has_osdmap_manifest = true;
dout(10) << __func__ << " store osdmap manifest pinned ("
<< osdmap_manifest.get_first_pinned()
<< " .. "
<< osdmap_manifest.get_last_pinned()
<< ")"
<< dendl;
}
bool OSDMonitor::should_prune() const
{
version_t first = get_first_committed();
version_t last = get_last_committed();
version_t min_osdmap_epochs =
g_conf().get_val<int64_t>("mon_min_osdmap_epochs");
version_t prune_min =
g_conf().get_val<uint64_t>("mon_osdmap_full_prune_min");
version_t prune_interval =
g_conf().get_val<uint64_t>("mon_osdmap_full_prune_interval");
version_t last_pinned = osdmap_manifest.get_last_pinned();
version_t last_to_pin = last - min_osdmap_epochs;
// Make it or break it constraints.
//
// If any of these conditions fails, we will not prune, regardless of
// whether we have an on-disk manifest with an on-going pruning state.
//
if ((last - first) <= min_osdmap_epochs) {
// between the first and last committed epochs, we don't have
// enough epochs to trim, much less to prune.
dout(10) << __func__
<< " currently holding only " << (last - first)
<< " epochs (min osdmap epochs: " << min_osdmap_epochs
<< "); do not prune."
<< dendl;
return false;
} else if ((last_to_pin - first) < prune_min) {
// between the first committed epoch and the last epoch we would prune,
// we simply don't have enough versions over the minimum to prune maps.
dout(10) << __func__
<< " could only prune " << (last_to_pin - first)
<< " epochs (" << first << ".." << last_to_pin << "), which"
" is less than the required minimum (" << prune_min << ")"
<< dendl;
return false;
} else if (has_osdmap_manifest && last_pinned >= last_to_pin) {
dout(10) << __func__
<< " we have pruned as far as we can; do not prune."
<< dendl;
return false;
} else if (last_pinned + prune_interval > last_to_pin) {
dout(10) << __func__
<< " not enough epochs to form an interval (last pinned: "
<< last_pinned << ", last to pin: "
<< last_to_pin << ", interval: " << prune_interval << ")"
<< dendl;
return false;
}
dout(15) << __func__
<< " should prune (" << last_pinned << ".." << last_to_pin << ")"
<< " lc (" << first << ".." << last << ")"
<< dendl;
return true;
}
void OSDMonitor::_prune_update_trimmed(
MonitorDBStore::TransactionRef tx,
version_t first)
{
dout(10) << __func__
<< " first " << first
<< " last_pinned " << osdmap_manifest.get_last_pinned()
<< dendl;
osdmap_manifest_t manifest = osdmap_manifest;
if (!manifest.is_pinned(first)) {
manifest.pin(first);
}
set<version_t>::iterator p_end = manifest.pinned.find(first);
set<version_t>::iterator p = manifest.pinned.begin();
manifest.pinned.erase(p, p_end);
ceph_assert(manifest.get_first_pinned() == first);
if (manifest.get_last_pinned() == first+1 ||
manifest.pinned.size() == 1) {
// we reached the end of the line, as pinned maps go; clean up our
// manifest, and let `should_prune()` decide whether we should prune
// again.
tx->erase(get_service_name(), "osdmap_manifest");
return;
}
bufferlist bl;
manifest.encode(bl);
tx->put(get_service_name(), "osdmap_manifest", bl);
}
void OSDMonitor::prune_init(osdmap_manifest_t& manifest)
{
dout(1) << __func__ << dendl;
version_t pin_first;
// verify constrainsts on stable in-memory state
if (!has_osdmap_manifest) {
// we must have never pruned, OR if we pruned the state must no longer
// be relevant (i.e., the state must have been removed alongside with
// the trim that *must* have removed past the last pinned map in a
// previous prune).
ceph_assert(osdmap_manifest.pinned.empty());
ceph_assert(!mon.store->exists(get_service_name(), "osdmap_manifest"));
pin_first = get_first_committed();
} else {
// we must have pruned in the past AND its state is still relevant
// (i.e., even if we trimmed, we still hold pinned maps in the manifest,
// and thus we still hold a manifest in the store).
ceph_assert(!osdmap_manifest.pinned.empty());
ceph_assert(osdmap_manifest.get_first_pinned() == get_first_committed());
ceph_assert(osdmap_manifest.get_last_pinned() < get_last_committed());
dout(10) << __func__
<< " first_pinned " << osdmap_manifest.get_first_pinned()
<< " last_pinned " << osdmap_manifest.get_last_pinned()
<< dendl;
pin_first = osdmap_manifest.get_last_pinned();
}
manifest.pin(pin_first);
}
bool OSDMonitor::_prune_sanitize_options() const
{
uint64_t prune_interval =
g_conf().get_val<uint64_t>("mon_osdmap_full_prune_interval");
uint64_t prune_min =
g_conf().get_val<uint64_t>("mon_osdmap_full_prune_min");
uint64_t txsize =
g_conf().get_val<uint64_t>("mon_osdmap_full_prune_txsize");
bool r = true;
if (prune_interval == 0) {
derr << __func__
<< " prune is enabled BUT prune interval is zero; abort."
<< dendl;
r = false;
} else if (prune_interval == 1) {
derr << __func__
<< " prune interval is equal to one, which essentially means"
" no pruning; abort."
<< dendl;
r = false;
}
if (prune_min == 0) {
derr << __func__
<< " prune is enabled BUT prune min is zero; abort."
<< dendl;
r = false;
}
if (prune_interval > prune_min) {
derr << __func__
<< " impossible to ascertain proper prune interval because"
<< " it is greater than the minimum prune epochs"
<< " (min: " << prune_min << ", interval: " << prune_interval << ")"
<< dendl;
r = false;
}
if (txsize < prune_interval - 1) {
derr << __func__
<< " 'mon_osdmap_full_prune_txsize' (" << txsize
<< ") < 'mon_osdmap_full_prune_interval-1' (" << prune_interval - 1
<< "); abort." << dendl;
r = false;
}
return r;
}
bool OSDMonitor::is_prune_enabled() const {
return g_conf().get_val<bool>("mon_osdmap_full_prune_enabled");
}
bool OSDMonitor::is_prune_supported() const {
return mon.get_required_mon_features().contains_any(
ceph::features::mon::FEATURE_OSDMAP_PRUNE);
}
/** do_prune
*
* @returns true if has side-effects; false otherwise.
*/
bool OSDMonitor::do_prune(MonitorDBStore::TransactionRef tx)
{
bool enabled = is_prune_enabled();
dout(1) << __func__ << " osdmap full prune "
<< ( enabled ? "enabled" : "disabled")
<< dendl;
if (!enabled || !_prune_sanitize_options() || !should_prune()) {
return false;
}
// we are beyond the minimum prune versions, we need to remove maps because
// otherwise the store will grow unbounded and we may end up having issues
// with available disk space or store hangs.
// we will not pin all versions. We will leave a buffer number of versions.
// this allows us the monitor to trim maps without caring too much about
// pinned maps, and then allow us to use another ceph-mon without these
// capabilities, without having to repair the store.
osdmap_manifest_t manifest = osdmap_manifest;
version_t first = get_first_committed();
version_t last = get_last_committed();
version_t last_to_pin = last - g_conf()->mon_min_osdmap_epochs;
version_t last_pinned = manifest.get_last_pinned();
uint64_t prune_interval =
g_conf().get_val<uint64_t>("mon_osdmap_full_prune_interval");
uint64_t txsize =
g_conf().get_val<uint64_t>("mon_osdmap_full_prune_txsize");
prune_init(manifest);
// we need to get rid of some osdmaps
dout(5) << __func__
<< " lc (" << first << " .. " << last << ")"
<< " last_pinned " << last_pinned
<< " interval " << prune_interval
<< " last_to_pin " << last_to_pin
<< dendl;
// We will be erasing maps as we go.
//
// We will erase all maps between `last_pinned` and the `next_to_pin`.
//
// If `next_to_pin` happens to be greater than `last_to_pin`, then
// we stop pruning. We could prune the maps between `next_to_pin` and
// `last_to_pin`, but by not doing it we end up with neater pruned
// intervals, aligned with `prune_interval`. Besides, this should not be a
// problem as long as `prune_interval` is set to a sane value, instead of
// hundreds or thousands of maps.
auto map_exists = [this](version_t v) {
string k = mon.store->combine_strings("full", v);
return mon.store->exists(get_service_name(), k);
};
// 'interval' represents the number of maps from the last pinned
// i.e., if we pinned version 1 and have an interval of 10, we're pinning
// version 11 next; all intermediate versions will be removed.
//
// 'txsize' represents the maximum number of versions we'll be removing in
// this iteration. If 'txsize' is large enough to perform multiple passes
// pinning and removing maps, we will do so; if not, we'll do at least one
// pass. We are quite relaxed about honouring 'txsize', but we'll always
// ensure that we never go *over* the maximum.
// e.g., if we pin 1 and 11, we're removing versions [2..10]; i.e., 9 maps.
uint64_t removal_interval = prune_interval - 1;
if (txsize < removal_interval) {
dout(5) << __func__
<< " setting txsize to removal interval size ("
<< removal_interval << " versions"
<< dendl;
txsize = removal_interval;
}
ceph_assert(removal_interval > 0);
uint64_t num_pruned = 0;
while (num_pruned + removal_interval <= txsize) {
last_pinned = manifest.get_last_pinned();
if (last_pinned + prune_interval > last_to_pin) {
break;
}
ceph_assert(last_pinned < last_to_pin);
version_t next_pinned = last_pinned + prune_interval;
ceph_assert(next_pinned <= last_to_pin);
manifest.pin(next_pinned);
dout(20) << __func__
<< " last_pinned " << last_pinned
<< " next_pinned " << next_pinned
<< " num_pruned " << num_pruned
<< " removal interval (" << (last_pinned+1)
<< ".." << (next_pinned-1) << ")"
<< " txsize " << txsize << dendl;
ceph_assert(map_exists(last_pinned));
ceph_assert(map_exists(next_pinned));
for (version_t v = last_pinned+1; v < next_pinned; ++v) {
ceph_assert(!manifest.is_pinned(v));
dout(20) << __func__ << " pruning full osdmap e" << v << dendl;
string full_key = mon.store->combine_strings("full", v);
tx->erase(get_service_name(), full_key);
++num_pruned;
}
}
ceph_assert(num_pruned > 0);
bufferlist bl;
manifest.encode(bl);
tx->put(get_service_name(), "osdmap_manifest", bl);
return true;
}
// -------------
bool OSDMonitor::preprocess_query(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
Message *m = op->get_req();
dout(10) << "preprocess_query " << *m << " from " << m->get_orig_source_inst() << dendl;
switch (m->get_type()) {
// READs
case MSG_MON_COMMAND:
try {
return preprocess_command(op);
} catch (const bad_cmd_get& e) {
bufferlist bl;
mon.reply_command(op, -EINVAL, e.what(), bl, get_last_committed());
return true;
}
case CEPH_MSG_MON_GET_OSDMAP:
return preprocess_get_osdmap(op);
// damp updates
case MSG_OSD_MARK_ME_DOWN:
return preprocess_mark_me_down(op);
case MSG_OSD_MARK_ME_DEAD:
return preprocess_mark_me_dead(op);
case MSG_OSD_FULL:
return preprocess_full(op);
case MSG_OSD_FAILURE:
return preprocess_failure(op);
case MSG_OSD_BOOT:
return preprocess_boot(op);
case MSG_OSD_ALIVE:
return preprocess_alive(op);
case MSG_OSD_PG_CREATED:
return preprocess_pg_created(op);
case MSG_OSD_PG_READY_TO_MERGE:
return preprocess_pg_ready_to_merge(op);
case MSG_OSD_PGTEMP:
return preprocess_pgtemp(op);
case MSG_OSD_BEACON:
return preprocess_beacon(op);
case CEPH_MSG_POOLOP:
return preprocess_pool_op(op);
case MSG_REMOVE_SNAPS:
return preprocess_remove_snaps(op);
case MSG_MON_GET_PURGED_SNAPS:
return preprocess_get_purged_snaps(op);
default:
ceph_abort();
return true;
}
}
bool OSDMonitor::prepare_update(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
Message *m = op->get_req();
dout(7) << "prepare_update " << *m << " from " << m->get_orig_source_inst() << dendl;
switch (m->get_type()) {
// damp updates
case MSG_OSD_MARK_ME_DOWN:
return prepare_mark_me_down(op);
case MSG_OSD_MARK_ME_DEAD:
return prepare_mark_me_dead(op);
case MSG_OSD_FULL:
return prepare_full(op);
case MSG_OSD_FAILURE:
return prepare_failure(op);
case MSG_OSD_BOOT:
return prepare_boot(op);
case MSG_OSD_ALIVE:
return prepare_alive(op);
case MSG_OSD_PG_CREATED:
return prepare_pg_created(op);
case MSG_OSD_PGTEMP:
return prepare_pgtemp(op);
case MSG_OSD_PG_READY_TO_MERGE:
return prepare_pg_ready_to_merge(op);
case MSG_OSD_BEACON:
return prepare_beacon(op);
case MSG_MON_COMMAND:
try {
return prepare_command(op);
} catch (const bad_cmd_get& e) {
bufferlist bl;
mon.reply_command(op, -EINVAL, e.what(), bl, get_last_committed());
return true;
}
case CEPH_MSG_POOLOP:
return prepare_pool_op(op);
case MSG_REMOVE_SNAPS:
return prepare_remove_snaps(op);
default:
ceph_abort();
}
return false;
}
bool OSDMonitor::should_propose(double& delay)
{
dout(10) << "should_propose" << dendl;
// if full map, propose immediately! any subsequent changes will be clobbered.
if (pending_inc.fullmap.length())
return true;
// adjust osd weights?
if (!osd_weight.empty() &&
osd_weight.size() == (unsigned)osdmap.get_max_osd()) {
dout(0) << " adjusting osd weights based on " << osd_weight << dendl;
osdmap.adjust_osd_weights(osd_weight, pending_inc);
delay = 0.0;
osd_weight.clear();
return true;
}
return PaxosService::should_propose(delay);
}
// ---------------------------
// READs
bool OSDMonitor::preprocess_get_osdmap(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MMonGetOSDMap>();
uint64_t features = mon.get_quorum_con_features();
if (op->get_session() && op->get_session()->con_features)
features = op->get_session()->con_features;
dout(10) << __func__ << " " << *m << dendl;
MOSDMap *reply = new MOSDMap(mon.monmap->fsid, features);
epoch_t first = get_first_committed();
epoch_t last = osdmap.get_epoch();
int max = g_conf()->osd_map_message_max;
ssize_t max_bytes = g_conf()->osd_map_message_max_bytes;
for (epoch_t e = std::max(first, m->get_full_first());
e <= std::min(last, m->get_full_last()) && max > 0 && max_bytes > 0;
++e, --max) {
bufferlist& bl = reply->maps[e];
int r = get_version_full(e, features, bl);
ceph_assert(r >= 0);
max_bytes -= bl.length();
}
for (epoch_t e = std::max(first, m->get_inc_first());
e <= std::min(last, m->get_inc_last()) && max > 0 && max_bytes > 0;
++e, --max) {
bufferlist& bl = reply->incremental_maps[e];
int r = get_version(e, features, bl);
ceph_assert(r >= 0);
max_bytes -= bl.length();
}
reply->cluster_osdmap_trim_lower_bound = first;
reply->newest_map = last;
mon.send_reply(op, reply);
return true;
}
// ---------------------------
// UPDATEs
// failure --
bool OSDMonitor::check_source(MonOpRequestRef op, uuid_d fsid) {
// check permissions
MonSession *session = op->get_session();
if (!session)
return true;
if (!session->is_capable("osd", MON_CAP_X)) {
dout(0) << "got MOSDFailure from entity with insufficient caps "
<< session->caps << dendl;
return true;
}
if (fsid != mon.monmap->fsid) {
dout(0) << "check_source: on fsid " << fsid
<< " != " << mon.monmap->fsid << dendl;
return true;
}
return false;
}
bool OSDMonitor::preprocess_failure(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDFailure>();
// who is target_osd
int badboy = m->get_target_osd();
// check permissions
if (check_source(op, m->fsid))
goto didit;
// first, verify the reporting host is valid
if (m->get_orig_source().is_osd()) {
int from = m->get_orig_source().num();
if (!osdmap.exists(from) ||
!osdmap.get_addrs(from).legacy_equals(m->get_orig_source_addrs()) ||
(osdmap.is_down(from) && m->if_osd_failed())) {
dout(5) << "preprocess_failure from dead osd." << from
<< ", ignoring" << dendl;
send_incremental(op, m->get_epoch()+1);
goto didit;
}
}
// weird?
if (osdmap.is_down(badboy)) {
dout(5) << "preprocess_failure dne(/dup?): osd." << m->get_target_osd()
<< " " << m->get_target_addrs()
<< ", from " << m->get_orig_source() << dendl;
if (m->get_epoch() < osdmap.get_epoch())
send_incremental(op, m->get_epoch()+1);
goto didit;
}
if (osdmap.get_addrs(badboy) != m->get_target_addrs()) {
dout(5) << "preprocess_failure wrong osd: report osd." << m->get_target_osd()
<< " " << m->get_target_addrs()
<< " != map's " << osdmap.get_addrs(badboy)
<< ", from " << m->get_orig_source() << dendl;
if (m->get_epoch() < osdmap.get_epoch())
send_incremental(op, m->get_epoch()+1);
goto didit;
}
// already reported?
if (osdmap.is_down(badboy) ||
osdmap.get_up_from(badboy) > m->get_epoch()) {
dout(5) << "preprocess_failure dup/old: osd." << m->get_target_osd()
<< " " << m->get_target_addrs()
<< ", from " << m->get_orig_source() << dendl;
if (m->get_epoch() < osdmap.get_epoch())
send_incremental(op, m->get_epoch()+1);
goto didit;
}
if (!can_mark_down(badboy)) {
dout(5) << "preprocess_failure ignoring report of osd."
<< m->get_target_osd() << " " << m->get_target_addrs()
<< " from " << m->get_orig_source() << dendl;
goto didit;
}
dout(10) << "preprocess_failure new: osd." << m->get_target_osd()
<< " " << m->get_target_addrs()
<< ", from " << m->get_orig_source() << dendl;
return false;
didit:
mon.no_reply(op);
return true;
}
class C_AckMarkedDown : public C_MonOp {
OSDMonitor *osdmon;
public:
C_AckMarkedDown(
OSDMonitor *osdmon,
MonOpRequestRef op)
: C_MonOp(op), osdmon(osdmon) {}
void _finish(int r) override {
if (r == 0) {
auto m = op->get_req<MOSDMarkMeDown>();
osdmon->mon.send_reply(
op,
new MOSDMarkMeDown(
m->fsid,
m->target_osd,
m->target_addrs,
m->get_epoch(),
false)); // ACK itself does not request an ack
} else if (r == -EAGAIN) {
osdmon->dispatch(op);
} else {
ceph_abort_msgf("C_AckMarkedDown: unknown result %d", r);
}
}
~C_AckMarkedDown() override {
}
};
bool OSDMonitor::preprocess_mark_me_down(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDMarkMeDown>();
int from = m->target_osd;
// check permissions
if (check_source(op, m->fsid))
goto reply;
// first, verify the reporting host is valid
if (!m->get_orig_source().is_osd())
goto reply;
if (!osdmap.exists(from) ||
osdmap.is_down(from) ||
osdmap.get_addrs(from) != m->target_addrs) {
dout(5) << "preprocess_mark_me_down from dead osd."
<< from << ", ignoring" << dendl;
send_incremental(op, m->get_epoch()+1);
goto reply;
}
// no down might be set
if (!can_mark_down(from))
goto reply;
dout(10) << "MOSDMarkMeDown for: " << m->get_orig_source()
<< " " << m->target_addrs << dendl;
return false;
reply:
if (m->request_ack) {
Context *c(new C_AckMarkedDown(this, op));
c->complete(0);
}
return true;
}
bool OSDMonitor::prepare_mark_me_down(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDMarkMeDown>();
int target_osd = m->target_osd;
ceph_assert(osdmap.is_up(target_osd));
ceph_assert(osdmap.get_addrs(target_osd) == m->target_addrs);
mon.clog->info() << "osd." << target_osd << " marked itself " << ((m->down_and_dead) ? "down and dead" : "down");
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
if (m->down_and_dead) {
if (!pending_inc.new_xinfo.count(target_osd)) {
pending_inc.new_xinfo[target_osd] = osdmap.osd_xinfo[target_osd];
}
pending_inc.new_xinfo[target_osd].dead_epoch = m->get_epoch();
}
if (m->request_ack)
wait_for_finished_proposal(op, new C_AckMarkedDown(this, op));
return true;
}
bool OSDMonitor::preprocess_mark_me_dead(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDMarkMeDead>();
int from = m->target_osd;
// check permissions
if (check_source(op, m->fsid)) {
mon.no_reply(op);
return true;
}
// first, verify the reporting host is valid
if (!m->get_orig_source().is_osd()) {
mon.no_reply(op);
return true;
}
if (!osdmap.exists(from) ||
!osdmap.is_down(from)) {
dout(5) << __func__ << " from nonexistent or up osd." << from
<< ", ignoring" << dendl;
send_incremental(op, m->get_epoch()+1);
mon.no_reply(op);
return true;
}
return false;
}
bool OSDMonitor::prepare_mark_me_dead(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDMarkMeDead>();
int target_osd = m->target_osd;
ceph_assert(osdmap.is_down(target_osd));
mon.clog->info() << "osd." << target_osd << " marked itself dead as of e"
<< m->get_epoch();
if (!pending_inc.new_xinfo.count(target_osd)) {
pending_inc.new_xinfo[target_osd] = osdmap.osd_xinfo[target_osd];
}
pending_inc.new_xinfo[target_osd].dead_epoch = m->get_epoch();
wait_for_finished_proposal(
op,
new LambdaContext(
[op, this] (int r) {
if (r >= 0) {
mon.no_reply(op); // ignore on success
}
}
));
return true;
}
bool OSDMonitor::can_mark_down(int i)
{
if (osdmap.is_nodown(i)) {
dout(5) << __func__ << " osd." << i << " is marked as nodown, "
<< "will not mark it down" << dendl;
return false;
}
int num_osds = osdmap.get_num_osds();
if (num_osds == 0) {
dout(5) << __func__ << " no osds" << dendl;
return false;
}
int up = osdmap.get_num_up_osds() - pending_inc.get_net_marked_down(&osdmap);
float up_ratio = (float)up / (float)num_osds;
if (up_ratio < g_conf()->mon_osd_min_up_ratio) {
dout(2) << __func__ << " current up_ratio " << up_ratio << " < min "
<< g_conf()->mon_osd_min_up_ratio
<< ", will not mark osd." << i << " down" << dendl;
return false;
}
return true;
}
bool OSDMonitor::can_mark_up(int i)
{
if (osdmap.is_noup(i)) {
dout(5) << __func__ << " osd." << i << " is marked as noup, "
<< "will not mark it up" << dendl;
return false;
}
return true;
}
/**
* @note the parameter @p i apparently only exists here so we can output the
* osd's id on messages.
*/
bool OSDMonitor::can_mark_out(int i)
{
if (osdmap.is_noout(i)) {
dout(5) << __func__ << " osd." << i << " is marked as noout, "
<< "will not mark it out" << dendl;
return false;
}
int num_osds = osdmap.get_num_osds();
if (num_osds == 0) {
dout(5) << __func__ << " no osds" << dendl;
return false;
}
int in = osdmap.get_num_in_osds() - pending_inc.get_net_marked_out(&osdmap);
float in_ratio = (float)in / (float)num_osds;
if (in_ratio < g_conf()->mon_osd_min_in_ratio) {
if (i >= 0)
dout(5) << __func__ << " current in_ratio " << in_ratio << " < min "
<< g_conf()->mon_osd_min_in_ratio
<< ", will not mark osd." << i << " out" << dendl;
else
dout(5) << __func__ << " current in_ratio " << in_ratio << " < min "
<< g_conf()->mon_osd_min_in_ratio
<< ", will not mark osds out" << dendl;
return false;
}
return true;
}
bool OSDMonitor::can_mark_in(int i)
{
if (osdmap.is_noin(i)) {
dout(5) << __func__ << " osd." << i << " is marked as noin, "
<< "will not mark it in" << dendl;
return false;
}
return true;
}
bool OSDMonitor::check_failures(utime_t now)
{
bool found_failure = false;
auto p = failure_info.begin();
while (p != failure_info.end()) {
auto& [target_osd, fi] = *p;
if (can_mark_down(target_osd) &&
check_failure(now, target_osd, fi)) {
found_failure = true;
++p;
} else if (is_failure_stale(now, fi)) {
dout(10) << " dropping stale failure_info for osd." << target_osd
<< " from " << fi.reporters.size() << " reporters"
<< dendl;
p = failure_info.erase(p);
} else {
++p;
}
}
return found_failure;
}
utime_t OSDMonitor::get_grace_time(utime_t now,
int target_osd,
failure_info_t& fi) const
{
utime_t orig_grace(g_conf()->osd_heartbeat_grace, 0);
if (!g_conf()->mon_osd_adjust_heartbeat_grace) {
return orig_grace;
}
utime_t grace = orig_grace;
double halflife = (double)g_conf()->mon_osd_laggy_halflife;
double decay_k = ::log(.5) / halflife;
// scale grace period based on historical probability of 'lagginess'
// (false positive failures due to slowness).
const osd_xinfo_t& xi = osdmap.get_xinfo(target_osd);
const utime_t failed_for = now - fi.get_failed_since();
double decay = exp((double)failed_for * decay_k);
dout(20) << " halflife " << halflife << " decay_k " << decay_k
<< " failed_for " << failed_for << " decay " << decay << dendl;
double my_grace = decay * (double)xi.laggy_interval * xi.laggy_probability;
grace += my_grace;
// consider the peers reporting a failure a proxy for a potential
// 'subcluster' over the overall cluster that is similarly
// laggy. this is clearly not true in all cases, but will sometimes
// help us localize the grace correction to a subset of the system
// (say, a rack with a bad switch) that is unhappy.
double peer_grace = 0;
for (auto& [reporter, report] : fi.reporters) {
if (osdmap.exists(reporter)) {
const osd_xinfo_t& xi = osdmap.get_xinfo(reporter);
utime_t elapsed = now - xi.down_stamp;
double decay = exp((double)elapsed * decay_k);
peer_grace += decay * (double)xi.laggy_interval * xi.laggy_probability;
}
}
peer_grace /= (double)fi.reporters.size();
grace += peer_grace;
dout(10) << " osd." << target_osd << " has "
<< fi.reporters.size() << " reporters, "
<< grace << " grace (" << orig_grace << " + " << my_grace
<< " + " << peer_grace << "), max_failed_since " << fi.get_failed_since()
<< dendl;
return grace;
}
bool OSDMonitor::check_failure(utime_t now, int target_osd, failure_info_t& fi)
{
// already pending failure?
if (pending_inc.new_state.count(target_osd) &&
pending_inc.new_state[target_osd] & CEPH_OSD_UP) {
dout(10) << " already pending failure" << dendl;
return true;
}
set<string> reporters_by_subtree;
auto reporter_subtree_level = g_conf().get_val<string>("mon_osd_reporter_subtree_level");
ceph_assert(fi.reporters.size());
for (auto p = fi.reporters.begin(); p != fi.reporters.end();) {
// get the parent bucket whose type matches with "reporter_subtree_level".
// fall back to OSD if the level doesn't exist.
if (osdmap.exists(p->first)) {
auto reporter_loc = osdmap.crush->get_full_location(p->first);
if (auto iter = reporter_loc.find(reporter_subtree_level);
iter == reporter_loc.end()) {
reporters_by_subtree.insert("osd." + to_string(p->first));
} else {
reporters_by_subtree.insert(iter->second);
}
++p;
} else {
fi.cancel_report(p->first);;
p = fi.reporters.erase(p);
}
}
if (reporters_by_subtree.size() < g_conf().get_val<uint64_t>("mon_osd_min_down_reporters")) {
return false;
}
const utime_t failed_for = now - fi.get_failed_since();
const utime_t grace = get_grace_time(now, target_osd, fi);
if (failed_for >= grace) {
dout(1) << " we have enough reporters to mark osd." << target_osd
<< " down" << dendl;
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
mon.clog->info() << "osd." << target_osd << " failed ("
<< osdmap.crush->get_full_location_ordered_string(
target_osd)
<< ") ("
<< (int)reporters_by_subtree.size()
<< " reporters from different "
<< reporter_subtree_level << " after "
<< failed_for << " >= grace " << grace << ")";
return true;
}
return false;
}
bool OSDMonitor::is_failure_stale(utime_t now, failure_info_t& fi) const
{
// if it takes too long to either cancel the report to mark the osd down,
// some reporters must have failed to cancel their reports. let's just
// forget these reports.
const utime_t failed_for = now - fi.get_failed_since();
auto heartbeat_grace = cct->_conf.get_val<int64_t>("osd_heartbeat_grace");
auto heartbeat_stale = cct->_conf.get_val<int64_t>("osd_heartbeat_stale");
return failed_for >= (heartbeat_grace + heartbeat_stale);
}
void OSDMonitor::force_failure(int target_osd, int by)
{
// already pending failure?
if (pending_inc.new_state.count(target_osd) &&
pending_inc.new_state[target_osd] & CEPH_OSD_UP) {
dout(10) << " already pending failure" << dendl;
return;
}
dout(1) << " we're forcing failure of osd." << target_osd << dendl;
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
if (!pending_inc.new_xinfo.count(target_osd)) {
pending_inc.new_xinfo[target_osd] = osdmap.osd_xinfo[target_osd];
}
pending_inc.new_xinfo[target_osd].dead_epoch = pending_inc.epoch;
mon.clog->info() << "osd." << target_osd << " failed ("
<< osdmap.crush->get_full_location_ordered_string(target_osd)
<< ") (connection refused reported by osd." << by << ")";
return;
}
bool OSDMonitor::prepare_failure(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDFailure>();
dout(1) << "prepare_failure osd." << m->get_target_osd()
<< " " << m->get_target_addrs()
<< " from " << m->get_orig_source()
<< " is reporting failure:" << m->if_osd_failed() << dendl;
int target_osd = m->get_target_osd();
int reporter = m->get_orig_source().num();
ceph_assert(osdmap.is_up(target_osd));
ceph_assert(osdmap.get_addrs(target_osd) == m->get_target_addrs());
mon.no_reply(op);
if (m->if_osd_failed()) {
// calculate failure time
utime_t now = ceph_clock_now();
utime_t failed_since =
m->get_recv_stamp() - utime_t(m->failed_for, 0);
// add a report
if (m->is_immediate()) {
mon.clog->debug() << "osd." << m->get_target_osd()
<< " reported immediately failed by "
<< m->get_orig_source();
force_failure(target_osd, reporter);
return true;
}
mon.clog->debug() << "osd." << m->get_target_osd() << " reported failed by "
<< m->get_orig_source();
failure_info_t& fi = failure_info[target_osd];
fi.add_report(reporter, failed_since, op);
return check_failure(now, target_osd, fi);
} else {
// remove the report
mon.clog->debug() << "osd." << m->get_target_osd()
<< " failure report canceled by "
<< m->get_orig_source();
if (failure_info.count(target_osd)) {
failure_info_t& fi = failure_info[target_osd];
fi.cancel_report(reporter);
if (fi.reporters.empty()) {
dout(10) << " removing last failure_info for osd." << target_osd
<< dendl;
failure_info.erase(target_osd);
} else {
dout(10) << " failure_info for osd." << target_osd << " now "
<< fi.reporters.size() << " reporters" << dendl;
}
} else {
dout(10) << " no failure_info for osd." << target_osd << dendl;
}
}
return false;
}
void OSDMonitor::process_failures()
{
map<int,failure_info_t>::iterator p = failure_info.begin();
while (p != failure_info.end()) {
if (osdmap.is_up(p->first)) {
++p;
} else {
dout(10) << "process_failures osd." << p->first << dendl;
list<MonOpRequestRef> ls;
p->second.take_report_messages(ls);
failure_info.erase(p++);
while (!ls.empty()) {
MonOpRequestRef o = ls.front();
if (o) {
o->mark_event(__func__);
MOSDFailure *m = o->get_req<MOSDFailure>();
send_latest(o, m->get_epoch());
mon.no_reply(o);
}
ls.pop_front();
}
}
}
}
void OSDMonitor::take_all_failures(list<MonOpRequestRef>& ls)
{
dout(10) << __func__ << " on " << failure_info.size() << " osds" << dendl;
for (map<int,failure_info_t>::iterator p = failure_info.begin();
p != failure_info.end();
++p) {
p->second.take_report_messages(ls);
}
failure_info.clear();
}
int OSDMonitor::get_grace_interval_threshold()
{
int halflife = g_conf()->mon_osd_laggy_halflife;
// Scale the halflife period (default: 1_hr) by
// a factor (48) to calculate the threshold.
int grace_threshold_factor = 48;
return halflife * grace_threshold_factor;
}
bool OSDMonitor::grace_interval_threshold_exceeded(int last_failed_interval)
{
int grace_interval_threshold_secs = get_grace_interval_threshold();
if (last_failed_interval > grace_interval_threshold_secs) {
dout(1) << " last_failed_interval " << last_failed_interval
<< " > grace_interval_threshold_secs " << grace_interval_threshold_secs
<< dendl;
return true;
}
return false;
}
void OSDMonitor::set_default_laggy_params(int target_osd)
{
if (pending_inc.new_xinfo.count(target_osd) == 0) {
pending_inc.new_xinfo[target_osd] = osdmap.osd_xinfo[target_osd];
}
osd_xinfo_t& xi = pending_inc.new_xinfo[target_osd];
xi.down_stamp = pending_inc.modified;
xi.laggy_probability = 0.0;
xi.laggy_interval = 0;
dout(20) << __func__ << " reset laggy, now xi " << xi << dendl;
}
// boot --
bool OSDMonitor::preprocess_boot(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDBoot>();
int from = m->get_orig_source_inst().name.num();
// check permissions, ignore if failed (no response expected)
MonSession *session = op->get_session();
if (!session)
goto ignore;
if (!session->is_capable("osd", MON_CAP_X)) {
dout(0) << "got preprocess_boot message from entity with insufficient caps"
<< session->caps << dendl;
goto ignore;
}
if (m->sb.cluster_fsid != mon.monmap->fsid) {
dout(0) << "preprocess_boot on fsid " << m->sb.cluster_fsid
<< " != " << mon.monmap->fsid << dendl;
goto ignore;
}
if (m->get_orig_source_inst().addr.is_blank_ip()) {
dout(0) << "preprocess_boot got blank addr for " << m->get_orig_source_inst() << dendl;
goto ignore;
}
ceph_assert(m->get_orig_source_inst().name.is_osd());
// lower bound of N-2
if (!HAVE_FEATURE(m->osd_features, SERVER_PACIFIC)) {
mon.clog->info() << "disallowing boot of OSD "
<< m->get_orig_source_inst()
<< " because the osd lacks CEPH_FEATURE_SERVER_PACIFIC";
goto ignore;
}
// make sure osd versions do not span more than 3 releases
if (HAVE_FEATURE(m->osd_features, SERVER_QUINCY) &&
osdmap.require_osd_release < ceph_release_t::octopus) {
mon.clog->info() << "disallowing boot of quincy+ OSD "
<< m->get_orig_source_inst()
<< " because require_osd_release < octopus";
goto ignore;
}
if (HAVE_FEATURE(m->osd_features, SERVER_REEF) &&
osdmap.require_osd_release < ceph_release_t::pacific) {
mon.clog->info() << "disallowing boot of reef+ OSD "
<< m->get_orig_source_inst()
<< " because require_osd_release < pacific";
goto ignore;
}
// See crimson/osd/osd.cc: OSD::_send_boot
if (auto type_iter = m->metadata.find("osd_type");
type_iter != m->metadata.end()) {
const auto &otype = type_iter->second;
// m->metadata["osd_type"] must be "crimson", classic doesn't send osd_type
if (otype == "crimson") {
if (!osdmap.get_allow_crimson()) {
mon.clog->info()
<< "Disallowing boot of crimson-osd without allow_crimson "
<< "OSDMap flag. Run ceph osd set_allow_crimson to set "
<< "allow_crimson flag. Note that crimson-osd is "
<< "considered unstable and may result in crashes or "
<< "data loss. Its usage should be restricted to "
<< "testing and development.";
goto ignore;
}
} else {
derr << __func__ << ": osd " << m->get_orig_source_inst()
<< " sent non-crimson osd_type field in MOSDBoot: "
<< otype
<< " -- booting anyway"
<< dendl;
}
}
if (osdmap.stretch_mode_enabled &&
!(m->osd_features & CEPH_FEATUREMASK_STRETCH_MODE)) {
mon.clog->info() << "disallowing boot of OSD "
<< m->get_orig_source_inst()
<< " because stretch mode is on and OSD lacks support";
goto ignore;
}
// already booted?
if (osdmap.is_up(from) &&
osdmap.get_addrs(from).legacy_equals(m->get_orig_source_addrs()) &&
osdmap.get_cluster_addrs(from).legacy_equals(m->cluster_addrs)) {
// yup.
dout(7) << "preprocess_boot dup from " << m->get_orig_source()
<< " " << m->get_orig_source_addrs()
<< " =~ " << osdmap.get_addrs(from) << dendl;
_booted(op, false);
return true;
}
if (osdmap.exists(from) &&
!osdmap.get_uuid(from).is_zero() &&
osdmap.get_uuid(from) != m->sb.osd_fsid) {
dout(7) << __func__ << " from " << m->get_orig_source_inst()
<< " clashes with existing osd: different fsid"
<< " (ours: " << osdmap.get_uuid(from)
<< " ; theirs: " << m->sb.osd_fsid << ")" << dendl;
goto ignore;
}
if (osdmap.exists(from) &&
osdmap.get_info(from).up_from > m->version &&
osdmap.get_most_recent_addrs(from).legacy_equals(
m->get_orig_source_addrs())) {
dout(7) << "prepare_boot msg from before last up_from, ignoring" << dendl;
send_latest(op, m->sb.current_epoch+1);
return true;
}
// noup?
if (!can_mark_up(from)) {
dout(7) << "preprocess_boot ignoring boot from " << m->get_orig_source_inst() << dendl;
send_latest(op, m->sb.current_epoch+1);
return true;
}
dout(10) << "preprocess_boot from " << m->get_orig_source_inst() << dendl;
return false;
ignore:
return true;
}
bool OSDMonitor::prepare_boot(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDBoot>();
dout(7) << __func__ << " from " << m->get_source()
<< " sb " << m->sb
<< " client_addrs" << m->get_connection()->get_peer_addrs()
<< " cluster_addrs " << m->cluster_addrs
<< " hb_back_addrs " << m->hb_back_addrs
<< " hb_front_addrs " << m->hb_front_addrs
<< dendl;
ceph_assert(m->get_orig_source().is_osd());
int from = m->get_orig_source().num();
// does this osd exist?
if (from >= osdmap.get_max_osd()) {
dout(1) << "boot from osd." << from << " >= max_osd "
<< osdmap.get_max_osd() << dendl;
return false;
}
int oldstate = osdmap.exists(from) ? osdmap.get_state(from) : CEPH_OSD_NEW;
if (pending_inc.new_state.count(from))
oldstate ^= pending_inc.new_state[from];
// already up? mark down first?
if (osdmap.is_up(from)) {
dout(7) << __func__ << " was up, first marking down osd." << from << " "
<< osdmap.get_addrs(from) << dendl;
// preprocess should have caught these; if not, assert.
ceph_assert(!osdmap.get_addrs(from).legacy_equals(
m->get_orig_source_addrs()) ||
!osdmap.get_cluster_addrs(from).legacy_equals(m->cluster_addrs));
ceph_assert(osdmap.get_uuid(from) == m->sb.osd_fsid);
if (pending_inc.new_state.count(from) == 0 ||
(pending_inc.new_state[from] & CEPH_OSD_UP) == 0) {
// mark previous guy down
pending_inc.new_state[from] = CEPH_OSD_UP;
}
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
} else if (pending_inc.new_up_client.count(from)) {
// already prepared, just wait
dout(7) << __func__ << " already prepared, waiting on "
<< m->get_orig_source_addr() << dendl;
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
} else {
// mark new guy up.
pending_inc.new_up_client[from] = m->get_orig_source_addrs();
pending_inc.new_up_cluster[from] = m->cluster_addrs;
pending_inc.new_hb_back_up[from] = m->hb_back_addrs;
pending_inc.new_hb_front_up[from] = m->hb_front_addrs;
down_pending_out.erase(from); // if any
if (m->sb.weight)
osd_weight[from] = m->sb.weight;
// set uuid?
dout(10) << " setting osd." << from << " uuid to " << m->sb.osd_fsid
<< dendl;
if (!osdmap.exists(from) || osdmap.get_uuid(from) != m->sb.osd_fsid) {
// preprocess should have caught this; if not, assert.
ceph_assert(!osdmap.exists(from) || osdmap.get_uuid(from).is_zero());
pending_inc.new_uuid[from] = m->sb.osd_fsid;
}
// fresh osd?
if (m->sb.newest_map == 0 && osdmap.exists(from)) {
const osd_info_t& i = osdmap.get_info(from);
if (i.up_from > i.lost_at) {
dout(10) << " fresh osd; marking lost_at too" << dendl;
pending_inc.new_lost[from] = osdmap.get_epoch();
}
}
// metadata
bufferlist osd_metadata;
encode(m->metadata, osd_metadata);
pending_metadata[from] = osd_metadata;
pending_metadata_rm.erase(from);
// adjust last clean unmount epoch?
const osd_info_t& info = osdmap.get_info(from);
dout(10) << " old osd_info: " << info << dendl;
if (m->sb.mounted > info.last_clean_begin ||
(m->sb.mounted == info.last_clean_begin &&
m->sb.clean_thru > info.last_clean_end)) {
epoch_t begin = m->sb.mounted;
epoch_t end = m->sb.clean_thru;
dout(10) << __func__ << " osd." << from << " last_clean_interval "
<< "[" << info.last_clean_begin << "," << info.last_clean_end
<< ") -> [" << begin << "-" << end << ")"
<< dendl;
pending_inc.new_last_clean_interval[from] =
pair<epoch_t,epoch_t>(begin, end);
}
if (pending_inc.new_xinfo.count(from) == 0)
pending_inc.new_xinfo[from] = osdmap.osd_xinfo[from];
osd_xinfo_t& xi = pending_inc.new_xinfo[from];
if (m->boot_epoch == 0) {
xi.laggy_probability *= (1.0 - g_conf()->mon_osd_laggy_weight);
xi.laggy_interval *= (1.0 - g_conf()->mon_osd_laggy_weight);
dout(10) << " not laggy, new xi " << xi << dendl;
} else {
if (xi.down_stamp.sec()) {
int interval = ceph_clock_now().sec() -
xi.down_stamp.sec();
if (g_conf()->mon_osd_laggy_max_interval &&
(interval > g_conf()->mon_osd_laggy_max_interval)) {
interval = g_conf()->mon_osd_laggy_max_interval;
}
xi.laggy_interval =
interval * g_conf()->mon_osd_laggy_weight +
xi.laggy_interval * (1.0 - g_conf()->mon_osd_laggy_weight);
}
xi.laggy_probability =
g_conf()->mon_osd_laggy_weight +
xi.laggy_probability * (1.0 - g_conf()->mon_osd_laggy_weight);
dout(10) << " laggy, now xi " << xi << dendl;
}
// set features shared by the osd
if (m->osd_features)
xi.features = m->osd_features;
else
xi.features = m->get_connection()->get_features();
// mark in?
if ((g_conf()->mon_osd_auto_mark_auto_out_in &&
(oldstate & CEPH_OSD_AUTOOUT)) ||
(g_conf()->mon_osd_auto_mark_new_in && (oldstate & CEPH_OSD_NEW)) ||
(g_conf()->mon_osd_auto_mark_in)) {
if (can_mark_in(from)) {
if (xi.old_weight > 0) {
pending_inc.new_weight[from] = xi.old_weight;
xi.old_weight = 0;
} else {
pending_inc.new_weight[from] = CEPH_OSD_IN;
}
} else {
dout(7) << __func__ << " NOIN set, will not mark in "
<< m->get_orig_source_addr() << dendl;
}
}
// wait
wait_for_finished_proposal(op, new C_Booted(this, op));
}
return true;
}
void OSDMonitor::_booted(MonOpRequestRef op, bool logit)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDBoot>();
dout(7) << "_booted " << m->get_orig_source_inst()
<< " w " << m->sb.weight << " from " << m->sb.current_epoch << dendl;
if (logit) {
mon.clog->info() << m->get_source() << " " << m->get_orig_source_addrs()
<< " boot";
}
send_latest(op, m->sb.current_epoch+1);
}
// -------------
// full
bool OSDMonitor::preprocess_full(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDFull>();
int from = m->get_orig_source().num();
set<string> state;
unsigned mask = CEPH_OSD_NEARFULL | CEPH_OSD_BACKFILLFULL | CEPH_OSD_FULL;
// check permissions, ignore if failed
MonSession *session = op->get_session();
if (!session)
goto ignore;
if (!session->is_capable("osd", MON_CAP_X)) {
dout(0) << "MOSDFull from entity with insufficient privileges:"
<< session->caps << dendl;
goto ignore;
}
// ignore a full message from the osd instance that already went down
if (!osdmap.exists(from)) {
dout(7) << __func__ << " ignoring full message from nonexistent "
<< m->get_orig_source_inst() << dendl;
goto ignore;
}
if ((!osdmap.is_up(from) &&
osdmap.get_most_recent_addrs(from).legacy_equals(
m->get_orig_source_addrs())) ||
(osdmap.is_up(from) &&
!osdmap.get_addrs(from).legacy_equals(m->get_orig_source_addrs()))) {
dout(7) << __func__ << " ignoring full message from down "
<< m->get_orig_source_inst() << dendl;
goto ignore;
}
OSDMap::calc_state_set(osdmap.get_state(from), state);
if ((osdmap.get_state(from) & mask) == m->state) {
dout(7) << __func__ << " state already " << state << " for osd." << from
<< " " << m->get_orig_source_inst() << dendl;
_reply_map(op, m->version);
goto ignore;
}
dout(10) << __func__ << " want state " << state << " for osd." << from
<< " " << m->get_orig_source_inst() << dendl;
return false;
ignore:
return true;
}
bool OSDMonitor::prepare_full(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDFull>();
const int from = m->get_orig_source().num();
const unsigned mask = CEPH_OSD_NEARFULL | CEPH_OSD_BACKFILLFULL | CEPH_OSD_FULL;
const unsigned want_state = m->state & mask; // safety first
unsigned cur_state = osdmap.get_state(from);
auto p = pending_inc.new_state.find(from);
if (p != pending_inc.new_state.end()) {
cur_state ^= p->second;
}
cur_state &= mask;
set<string> want_state_set, cur_state_set;
OSDMap::calc_state_set(want_state, want_state_set);
OSDMap::calc_state_set(cur_state, cur_state_set);
if (cur_state != want_state) {
if (p != pending_inc.new_state.end()) {
p->second &= ~mask;
} else {
pending_inc.new_state[from] = 0;
}
pending_inc.new_state[from] |= (osdmap.get_state(from) & mask) ^ want_state;
dout(7) << __func__ << " osd." << from << " " << cur_state_set
<< " -> " << want_state_set << dendl;
} else {
dout(7) << __func__ << " osd." << from << " " << cur_state_set
<< " = wanted " << want_state_set << ", just waiting" << dendl;
}
wait_for_finished_proposal(op, new C_ReplyMap(this, op, m->version));
return true;
}
// -------------
// alive
bool OSDMonitor::preprocess_alive(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDAlive>();
int from = m->get_orig_source().num();
// check permissions, ignore if failed
MonSession *session = op->get_session();
if (!session)
goto ignore;
if (!session->is_capable("osd", MON_CAP_X)) {
dout(0) << "attempt to send MOSDAlive from entity with insufficient privileges:"
<< session->caps << dendl;
goto ignore;
}
if (!osdmap.is_up(from) ||
!osdmap.get_addrs(from).legacy_equals(m->get_orig_source_addrs())) {
dout(7) << "preprocess_alive ignoring alive message from down "
<< m->get_orig_source() << " " << m->get_orig_source_addrs()
<< dendl;
goto ignore;
}
if (osdmap.get_up_thru(from) >= m->want) {
// yup.
dout(7) << "preprocess_alive want up_thru " << m->want << " dup from " << m->get_orig_source_inst() << dendl;
_reply_map(op, m->version);
return true;
}
dout(10) << "preprocess_alive want up_thru " << m->want
<< " from " << m->get_orig_source_inst() << dendl;
return false;
ignore:
return true;
}
bool OSDMonitor::prepare_alive(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDAlive>();
int from = m->get_orig_source().num();
if (0) { // we probably don't care much about these
mon.clog->debug() << m->get_orig_source_inst() << " alive";
}
dout(7) << "prepare_alive want up_thru " << m->want << " have " << m->version
<< " from " << m->get_orig_source_inst() << dendl;
update_up_thru(from, m->version); // set to the latest map the OSD has
wait_for_finished_proposal(op, new C_ReplyMap(this, op, m->version));
return true;
}
void OSDMonitor::_reply_map(MonOpRequestRef op, epoch_t e)
{
op->mark_osdmon_event(__func__);
dout(7) << "_reply_map " << e
<< " from " << op->get_req()->get_orig_source_inst()
<< dendl;
send_latest(op, e);
}
// pg_created
bool OSDMonitor::preprocess_pg_created(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDPGCreated>();
dout(10) << __func__ << " " << *m << dendl;
auto session = op->get_session();
mon.no_reply(op);
if (!session) {
dout(10) << __func__ << ": no monitor session!" << dendl;
return true;
}
if (!session->is_capable("osd", MON_CAP_X)) {
derr << __func__ << " received from entity "
<< "with insufficient privileges " << session->caps << dendl;
return true;
}
// always forward the "created!" to the leader
return false;
}
bool OSDMonitor::prepare_pg_created(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDPGCreated>();
dout(10) << __func__ << " " << *m << dendl;
auto src = m->get_orig_source();
auto from = src.num();
if (!src.is_osd() ||
!mon.osdmon()->osdmap.is_up(from) ||
!mon.osdmon()->osdmap.get_addrs(from).legacy_equals(
m->get_orig_source_addrs())) {
dout(1) << __func__ << " ignoring stats from non-active osd." << dendl;
return false;
}
pending_created_pgs.push_back(m->pgid);
return true;
}
bool OSDMonitor::preprocess_pg_ready_to_merge(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDPGReadyToMerge>();
dout(10) << __func__ << " " << *m << dendl;
const pg_pool_t *pi;
auto session = op->get_session();
if (!session) {
dout(10) << __func__ << ": no monitor session!" << dendl;
goto ignore;
}
if (!session->is_capable("osd", MON_CAP_X)) {
derr << __func__ << " received from entity "
<< "with insufficient privileges " << session->caps << dendl;
goto ignore;
}
pi = osdmap.get_pg_pool(m->pgid.pool());
if (!pi) {
derr << __func__ << " pool for " << m->pgid << " dne" << dendl;
goto ignore;
}
if (pi->get_pg_num() <= m->pgid.ps()) {
dout(20) << " pg_num " << pi->get_pg_num() << " already < " << m->pgid << dendl;
goto ignore;
}
if (pi->get_pg_num() != m->pgid.ps() + 1) {
derr << " OSD trying to merge wrong pgid " << m->pgid << dendl;
goto ignore;
}
if (pi->get_pg_num_pending() > m->pgid.ps()) {
dout(20) << " pg_num_pending " << pi->get_pg_num_pending() << " > " << m->pgid << dendl;
goto ignore;
}
return false;
ignore:
mon.no_reply(op);
return true;
}
bool OSDMonitor::prepare_pg_ready_to_merge(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDPGReadyToMerge>();
dout(10) << __func__ << " " << *m << dendl;
pg_pool_t p;
if (pending_inc.new_pools.count(m->pgid.pool()))
p = pending_inc.new_pools[m->pgid.pool()];
else
p = *osdmap.get_pg_pool(m->pgid.pool());
if (p.get_pg_num() != m->pgid.ps() + 1 ||
p.get_pg_num_pending() > m->pgid.ps()) {
dout(10) << __func__
<< " race with concurrent pg_num[_pending] update, will retry"
<< dendl;
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
if (m->ready) {
p.dec_pg_num(m->pgid,
pending_inc.epoch,
m->source_version,
m->target_version,
m->last_epoch_started,
m->last_epoch_clean);
p.last_change = pending_inc.epoch;
} else {
// back off the merge attempt!
p.set_pg_num_pending(p.get_pg_num());
}
// force pre-nautilus clients to resend their ops, since they
// don't understand pg_num_pending changes form a new interval
p.last_force_op_resend_prenautilus = pending_inc.epoch;
pending_inc.new_pools[m->pgid.pool()] = p;
auto prob = g_conf().get_val<double>("mon_inject_pg_merge_bounce_probability");
if (m->ready &&
prob > 0 &&
prob > (double)(rand() % 1000)/1000.0) {
derr << __func__ << " injecting pg merge pg_num bounce" << dendl;
auto n = new MMonCommand(mon.monmap->get_fsid());
n->set_connection(m->get_connection());
n->cmd = { "{\"prefix\":\"osd pool set\", \"pool\": \"" +
osdmap.get_pool_name(m->pgid.pool()) +
"\", \"var\": \"pg_num_actual\", \"val\": \"" +
stringify(m->pgid.ps() + 1) + "\"}" };
MonOpRequestRef nop = mon.op_tracker.create_request<MonOpRequest>(n);
nop->set_type_service();
wait_for_finished_proposal(op, new C_RetryMessage(this, nop));
} else {
wait_for_finished_proposal(op, new C_ReplyMap(this, op, m->version));
}
return true;
}
// -------------
// pg_temp changes
bool OSDMonitor::preprocess_pgtemp(MonOpRequestRef op)
{
auto m = op->get_req<MOSDPGTemp>();
dout(10) << "preprocess_pgtemp " << *m << dendl;
mempool::osdmap::vector<int> empty;
int from = m->get_orig_source().num();
size_t ignore_cnt = 0;
// check caps
MonSession *session = op->get_session();
if (!session)
goto ignore;
if (!session->is_capable("osd", MON_CAP_X)) {
dout(0) << "attempt to send MOSDPGTemp from entity with insufficient caps "
<< session->caps << dendl;
goto ignore;
}
if (!osdmap.is_up(from) ||
!osdmap.get_addrs(from).legacy_equals(m->get_orig_source_addrs())) {
dout(7) << "ignoring pgtemp message from down "
<< m->get_orig_source() << " " << m->get_orig_source_addrs()
<< dendl;
goto ignore;
}
if (m->forced) {
return false;
}
for (auto p = m->pg_temp.begin(); p != m->pg_temp.end(); ++p) {
dout(20) << " " << p->first
<< (osdmap.pg_temp->count(p->first) ? osdmap.pg_temp->get(p->first) : empty)
<< " -> " << p->second << dendl;
// does the pool exist?
if (!osdmap.have_pg_pool(p->first.pool())) {
/*
* 1. If the osdmap does not have the pool, it means the pool has been
* removed in-between the osd sending this message and us handling it.
* 2. If osdmap doesn't have the pool, it is safe to assume the pool does
* not exist in the pending either, as the osds would not send a
* message about a pool they know nothing about (yet).
* 3. However, if the pool does exist in the pending, then it must be a
* new pool, and not relevant to this message (see 1).
*/
dout(10) << __func__ << " ignore " << p->first << " -> " << p->second
<< ": pool has been removed" << dendl;
ignore_cnt++;
continue;
}
int acting_primary = -1;
osdmap.pg_to_up_acting_osds(
p->first, nullptr, nullptr, nullptr, &acting_primary);
if (acting_primary != from) {
/* If the source isn't the primary based on the current osdmap, we know
* that the interval changed and that we can discard this message.
* Indeed, we must do so to avoid 16127 since we can't otherwise determine
* which of two pg temp mappings on the same pg is more recent.
*/
dout(10) << __func__ << " ignore " << p->first << " -> " << p->second
<< ": primary has changed" << dendl;
ignore_cnt++;
continue;
}
// removal?
if (p->second.empty() && (osdmap.pg_temp->count(p->first) ||
osdmap.primary_temp->count(p->first)))
return false;
// change?
// NOTE: we assume that this will clear pg_primary, so consider
// an existing pg_primary field to imply a change
if (p->second.size() &&
(osdmap.pg_temp->count(p->first) == 0 ||
osdmap.pg_temp->get(p->first) != p->second ||
osdmap.primary_temp->count(p->first)))
return false;
}
// should we ignore all the pgs?
if (ignore_cnt == m->pg_temp.size())
goto ignore;
dout(7) << "preprocess_pgtemp e" << m->map_epoch << " no changes from " << m->get_orig_source_inst() << dendl;
_reply_map(op, m->map_epoch);
return true;
ignore:
mon.no_reply(op);
return true;
}
void OSDMonitor::update_up_thru(int from, epoch_t up_thru)
{
epoch_t old_up_thru = osdmap.get_up_thru(from);
auto ut = pending_inc.new_up_thru.find(from);
if (ut != pending_inc.new_up_thru.end()) {
old_up_thru = ut->second;
}
if (up_thru > old_up_thru) {
// set up_thru too, so the osd doesn't have to ask again
pending_inc.new_up_thru[from] = up_thru;
}
}
bool OSDMonitor::prepare_pgtemp(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MOSDPGTemp>();
int from = m->get_orig_source().num();
dout(7) << "prepare_pgtemp e" << m->map_epoch << " from " << m->get_orig_source_inst() << dendl;
for (map<pg_t,vector<int32_t> >::iterator p = m->pg_temp.begin(); p != m->pg_temp.end(); ++p) {
uint64_t pool = p->first.pool();
if (pending_inc.old_pools.count(pool)) {
dout(10) << __func__ << " ignore " << p->first << " -> " << p->second
<< ": pool pending removal" << dendl;
continue;
}
if (!osdmap.have_pg_pool(pool)) {
dout(10) << __func__ << " ignore " << p->first << " -> " << p->second
<< ": pool has been removed" << dendl;
continue;
}
pending_inc.new_pg_temp[p->first] =
mempool::osdmap::vector<int>(p->second.begin(), p->second.end());
// unconditionally clear pg_primary (until this message can encode
// a change for that, too.. at which point we need to also fix
// preprocess_pg_temp)
if (osdmap.primary_temp->count(p->first) ||
pending_inc.new_primary_temp.count(p->first))
pending_inc.new_primary_temp[p->first] = -1;
}
// set up_thru too, so the osd doesn't have to ask again
update_up_thru(from, m->map_epoch);
wait_for_finished_proposal(op, new C_ReplyMap(this, op, m->map_epoch));
return true;
}
// ---
bool OSDMonitor::preprocess_remove_snaps(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MRemoveSnaps>();
dout(7) << "preprocess_remove_snaps " << *m << dendl;
// check privilege, ignore if failed
MonSession *session = op->get_session();
mon.no_reply(op);
if (!session)
goto ignore;
if (!session->caps.is_capable(
cct,
session->entity_name,
"osd", "osd pool rmsnap", {}, true, true, false,
session->get_peer_socket_addr())) {
dout(0) << "got preprocess_remove_snaps from entity with insufficient caps "
<< session->caps << dendl;
goto ignore;
}
for (map<int, vector<snapid_t> >::iterator q = m->snaps.begin();
q != m->snaps.end();
++q) {
if (!osdmap.have_pg_pool(q->first)) {
dout(10) << " ignoring removed_snaps " << q->second
<< " on non-existent pool " << q->first << dendl;
continue;
}
const pg_pool_t *pi = osdmap.get_pg_pool(q->first);
for (vector<snapid_t>::iterator p = q->second.begin();
p != q->second.end();
++p) {
if (*p > pi->get_snap_seq() ||
!_is_removed_snap(q->first, *p)) {
return false;
}
}
}
if (HAVE_FEATURE(m->get_connection()->get_features(), SERVER_OCTOPUS)) {
auto reply = make_message<MRemoveSnaps>();
reply->snaps = m->snaps;
mon.send_reply(op, reply.detach());
}
ignore:
return true;
}
bool OSDMonitor::prepare_remove_snaps(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MRemoveSnaps>();
dout(7) << "prepare_remove_snaps " << *m << dendl;
for (auto& [pool, snaps] : m->snaps) {
if (!osdmap.have_pg_pool(pool)) {
dout(10) << " ignoring removed_snaps " << snaps
<< " on non-existent pool " << pool << dendl;
continue;
}
pg_pool_t& pi = osdmap.pools[pool];
for (auto s : snaps) {
if (!_is_removed_snap(pool, s) &&
(!pending_inc.new_pools.count(pool) ||
!pending_inc.new_pools[pool].removed_snaps.contains(s)) &&
(!pending_inc.new_removed_snaps.count(pool) ||
!pending_inc.new_removed_snaps[pool].contains(s))) {
pg_pool_t *newpi = pending_inc.get_new_pool(pool, &pi);
if (osdmap.require_osd_release < ceph_release_t::octopus) {
newpi->removed_snaps.insert(s);
dout(10) << " pool " << pool << " removed_snaps added " << s
<< " (now " << newpi->removed_snaps << ")" << dendl;
}
newpi->flags |= pg_pool_t::FLAG_SELFMANAGED_SNAPS;
if (s > newpi->get_snap_seq()) {
dout(10) << " pool " << pool << " snap_seq "
<< newpi->get_snap_seq() << " -> " << s << dendl;
newpi->set_snap_seq(s);
}
newpi->set_snap_epoch(pending_inc.epoch);
dout(10) << " added pool " << pool << " snap " << s
<< " to removed_snaps queue" << dendl;
pending_inc.new_removed_snaps[pool].insert(s);
}
}
}
if (HAVE_FEATURE(m->get_connection()->get_features(), SERVER_OCTOPUS)) {
auto reply = make_message<MRemoveSnaps>();
reply->snaps = m->snaps;
wait_for_finished_proposal(op, new C_ReplyOp(this, op, reply));
}
return true;
}
bool OSDMonitor::preprocess_get_purged_snaps(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MMonGetPurgedSnaps>();
dout(7) << __func__ << " " << *m << dendl;
map<epoch_t,mempool::osdmap::map<int64_t,snap_interval_set_t>> r;
string k = make_purged_snap_epoch_key(m->start);
auto it = mon.store->get_iterator(OSD_SNAP_PREFIX);
it->upper_bound(k);
unsigned long epoch = m->last;
while (it->valid()) {
if (it->key().find("purged_epoch_") != 0) {
break;
}
string k = it->key();
int n = sscanf(k.c_str(), "purged_epoch_%lx", &epoch);
if (n != 1) {
derr << __func__ << " unable to parse key '" << it->key() << "'" << dendl;
} else if (epoch > m->last) {
break;
} else {
bufferlist bl = it->value();
auto p = bl.cbegin();
auto &v = r[epoch];
try {
ceph::decode(v, p);
} catch (ceph::buffer::error& e) {
derr << __func__ << " unable to parse value for key '" << it->key()
<< "': \n";
bl.hexdump(*_dout);
*_dout << dendl;
}
n += 4 + v.size() * 16;
}
if (n > 1048576) {
// impose a semi-arbitrary limit to message size
break;
}
it->next();
}
auto reply = make_message<MMonGetPurgedSnapsReply>(m->start, epoch);
reply->purged_snaps.swap(r);
mon.send_reply(op, reply.detach());
return true;
}
// osd beacon
bool OSDMonitor::preprocess_beacon(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
// check caps
auto session = op->get_session();
mon.no_reply(op);
if (!session) {
dout(10) << __func__ << " no monitor session!" << dendl;
return true;
}
if (!session->is_capable("osd", MON_CAP_X)) {
derr << __func__ << " received from entity "
<< "with insufficient privileges " << session->caps << dendl;
return true;
}
// Always forward the beacon to the leader, even if they are the same as
// the old one. The leader will mark as down osds that haven't sent
// beacon for a few minutes.
return false;
}
bool OSDMonitor::prepare_beacon(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
const auto beacon = op->get_req<MOSDBeacon>();
const auto src = beacon->get_orig_source();
dout(10) << __func__ << " " << *beacon
<< " from " << src << dendl;
int from = src.num();
if (!src.is_osd() ||
!osdmap.is_up(from) ||
!osdmap.get_addrs(from).legacy_equals(beacon->get_orig_source_addrs())) {
if (src.is_osd() && !osdmap.is_up(from)) {
// share some new maps with this guy in case it may not be
// aware of its own deadness...
send_latest(op, beacon->version+1);
}
dout(1) << " ignoring beacon from non-active osd." << from << dendl;
return false;
}
last_osd_report[from].first = ceph_clock_now();
last_osd_report[from].second = beacon->osd_beacon_report_interval;
osd_epochs[from] = beacon->version;
for (const auto& pg : beacon->pgs) {
if (auto* pool = osdmap.get_pg_pool(pg.pool()); pool != nullptr) {
unsigned pg_num = pool->get_pg_num();
last_epoch_clean.report(pg_num, pg, beacon->min_last_epoch_clean);
}
}
if (osdmap.osd_xinfo[from].last_purged_snaps_scrub <
beacon->last_purged_snaps_scrub) {
if (pending_inc.new_xinfo.count(from) == 0) {
pending_inc.new_xinfo[from] = osdmap.osd_xinfo[from];
}
pending_inc.new_xinfo[from].last_purged_snaps_scrub =
beacon->last_purged_snaps_scrub;
return true;
} else {
return false;
}
}
// ---------------
// map helpers
void OSDMonitor::send_latest(MonOpRequestRef op, epoch_t start)
{
op->mark_osdmon_event(__func__);
dout(5) << "send_latest to " << op->get_req()->get_orig_source_inst()
<< " start " << start << dendl;
if (start == 0)
send_full(op);
else
send_incremental(op, start);
}
MOSDMap *OSDMonitor::build_latest_full(uint64_t features)
{
MOSDMap *r = new MOSDMap(mon.monmap->fsid, features);
get_version_full(osdmap.get_epoch(), features, r->maps[osdmap.get_epoch()]);
r->cluster_osdmap_trim_lower_bound = get_first_committed();
r->newest_map = osdmap.get_epoch();
return r;
}
MOSDMap *OSDMonitor::build_incremental(epoch_t from, epoch_t to, uint64_t features)
{
dout(10) << "build_incremental [" << from << ".." << to << "] with features "
<< std::hex << features << std::dec << dendl;
MOSDMap *m = new MOSDMap(mon.monmap->fsid, features);
m->cluster_osdmap_trim_lower_bound = get_first_committed();
m->newest_map = osdmap.get_epoch();
for (epoch_t e = to; e >= from && e > 0; e--) {
bufferlist bl;
int err = get_version(e, features, bl);
if (err == 0) {
ceph_assert(bl.length());
// if (get_version(e, bl) > 0) {
dout(20) << "build_incremental inc " << e << " "
<< bl.length() << " bytes" << dendl;
m->incremental_maps[e] = bl;
} else {
ceph_assert(err == -ENOENT);
ceph_assert(!bl.length());
get_version_full(e, features, bl);
if (bl.length() > 0) {
//else if (get_version("full", e, bl) > 0) {
dout(20) << "build_incremental full " << e << " "
<< bl.length() << " bytes" << dendl;
m->maps[e] = bl;
} else {
ceph_abort(); // we should have all maps.
}
}
}
return m;
}
void OSDMonitor::send_full(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
dout(5) << "send_full to " << op->get_req()->get_orig_source_inst() << dendl;
mon.send_reply(op, build_latest_full(op->get_session()->con_features));
}
void OSDMonitor::send_incremental(MonOpRequestRef op, epoch_t first)
{
op->mark_osdmon_event(__func__);
MonSession *s = op->get_session();
ceph_assert(s);
if (s->proxy_con) {
// oh, we can tell the other mon to do it
dout(10) << __func__ << " asking proxying mon to send_incremental from "
<< first << dendl;
MRoute *r = new MRoute(s->proxy_tid, NULL);
r->send_osdmap_first = first;
s->proxy_con->send_message(r);
op->mark_event("reply: send routed send_osdmap_first reply");
} else {
// do it ourselves
send_incremental(first, s, false, op);
}
}
void OSDMonitor::send_incremental(epoch_t first,
MonSession *session,
bool onetime,
MonOpRequestRef req)
{
dout(5) << "send_incremental [" << first << ".." << osdmap.get_epoch() << "]"
<< " to " << session->name << dendl;
// get feature of the peer
// use quorum_con_features, if it's an anonymous connection.
uint64_t features = session->con_features ? session->con_features :
mon.get_quorum_con_features();
if (first <= session->osd_epoch) {
dout(10) << __func__ << " " << session->name << " should already have epoch "
<< session->osd_epoch << dendl;
first = session->osd_epoch + 1;
}
if (first < get_first_committed()) {
MOSDMap *m = new MOSDMap(osdmap.get_fsid(), features);
m->cluster_osdmap_trim_lower_bound = get_first_committed();
m->newest_map = osdmap.get_epoch();
first = get_first_committed();
bufferlist bl;
int err = get_version_full(first, features, bl);
ceph_assert(err == 0);
ceph_assert(bl.length());
dout(20) << "send_incremental starting with base full "
<< first << " " << bl.length() << " bytes" << dendl;
m->maps[first] = bl;
if (req) {
mon.send_reply(req, m);
session->osd_epoch = first;
return;
} else {
session->con->send_message(m);
session->osd_epoch = first;
}
first++;
}
while (first <= osdmap.get_epoch()) {
epoch_t last = std::min<epoch_t>(first + g_conf()->osd_map_message_max - 1,
osdmap.get_epoch());
MOSDMap *m = build_incremental(first, last, features);
if (req) {
// send some maps. it may not be all of them, but it will get them
// started.
mon.send_reply(req, m);
} else {
session->con->send_message(m);
first = last + 1;
}
session->osd_epoch = last;
if (onetime || req)
break;
}
}
int OSDMonitor::get_version(version_t ver, bufferlist& bl)
{
return get_version(ver, mon.get_quorum_con_features(), bl);
}
void OSDMonitor::reencode_incremental_map(bufferlist& bl, uint64_t features)
{
OSDMap::Incremental inc;
auto q = bl.cbegin();
inc.decode(q);
// always encode with subset of osdmap's canonical features
uint64_t f = features & inc.encode_features;
dout(20) << __func__ << " " << inc.epoch << " with features " << f
<< dendl;
bl.clear();
if (inc.fullmap.length()) {
// embedded full map?
OSDMap m;
m.decode(inc.fullmap);
inc.fullmap.clear();
m.encode(inc.fullmap, f | CEPH_FEATURE_RESERVED);
}
if (inc.crush.length()) {
// embedded crush map
CrushWrapper c;
auto p = inc.crush.cbegin();
c.decode(p);
inc.crush.clear();
c.encode(inc.crush, f);
}
inc.encode(bl, f | CEPH_FEATURE_RESERVED);
}
void OSDMonitor::reencode_full_map(bufferlist& bl, uint64_t features)
{
OSDMap m;
auto q = bl.cbegin();
m.decode(q);
// always encode with subset of osdmap's canonical features
uint64_t f = features & m.get_encoding_features();
dout(20) << __func__ << " " << m.get_epoch() << " with features " << f
<< dendl;
bl.clear();
m.encode(bl, f | CEPH_FEATURE_RESERVED);
}
int OSDMonitor::get_version(version_t ver, uint64_t features, bufferlist& bl)
{
uint64_t significant_features = OSDMap::get_significant_features(features);
if (inc_osd_cache.lookup({ver, significant_features}, &bl)) {
return 0;
}
int ret = PaxosService::get_version(ver, bl);
if (ret < 0) {
return ret;
}
// NOTE: this check is imprecise; the OSDMap encoding features may
// be a subset of the latest mon quorum features, but worst case we
// reencode once and then cache the (identical) result under both
// feature masks.
if (significant_features !=
OSDMap::get_significant_features(mon.get_quorum_con_features())) {
reencode_incremental_map(bl, features);
}
inc_osd_cache.add_bytes({ver, significant_features}, bl);
return 0;
}
int OSDMonitor::get_inc(version_t ver, OSDMap::Incremental& inc)
{
bufferlist inc_bl;
int err = get_version(ver, inc_bl);
ceph_assert(err == 0);
ceph_assert(inc_bl.length());
auto p = inc_bl.cbegin();
inc.decode(p);
dout(10) << __func__ << " "
<< " epoch " << inc.epoch
<< " inc_crc " << inc.inc_crc
<< " full_crc " << inc.full_crc
<< " encode_features " << inc.encode_features << dendl;
return 0;
}
int OSDMonitor::get_full_from_pinned_map(version_t ver, bufferlist& bl)
{
dout(10) << __func__ << " ver " << ver << dendl;
version_t closest_pinned = osdmap_manifest.get_lower_closest_pinned(ver);
if (closest_pinned == 0) {
return -ENOENT;
}
if (closest_pinned > ver) {
dout(0) << __func__ << " pinned: " << osdmap_manifest.pinned << dendl;
}
ceph_assert(closest_pinned <= ver);
dout(10) << __func__ << " closest pinned ver " << closest_pinned << dendl;
// get osdmap incremental maps and apply on top of this one.
bufferlist osdm_bl;
bool has_cached_osdmap = false;
for (version_t v = ver-1; v >= closest_pinned; --v) {
if (full_osd_cache.lookup({v, mon.get_quorum_con_features()},
&osdm_bl)) {
dout(10) << __func__ << " found map in cache ver " << v << dendl;
closest_pinned = v;
has_cached_osdmap = true;
break;
}
}
if (!has_cached_osdmap) {
int err = PaxosService::get_version_full(closest_pinned, osdm_bl);
if (err != 0) {
derr << __func__ << " closest pinned map ver " << closest_pinned
<< " not available! error: " << cpp_strerror(err) << dendl;
}
ceph_assert(err == 0);
}
ceph_assert(osdm_bl.length());
OSDMap osdm;
osdm.decode(osdm_bl);
dout(10) << __func__ << " loaded osdmap epoch " << closest_pinned
<< " e" << osdm.epoch
<< " crc " << osdm.get_crc()
<< " -- applying incremental maps." << dendl;
uint64_t encode_features = 0;
for (version_t v = closest_pinned + 1; v <= ver; ++v) {
dout(20) << __func__ << " applying inc epoch " << v << dendl;
OSDMap::Incremental inc;
int err = get_inc(v, inc);
ceph_assert(err == 0);
encode_features = inc.encode_features;
err = osdm.apply_incremental(inc);
ceph_assert(err == 0);
// this block performs paranoid checks on map retrieval
if (g_conf().get_val<bool>("mon_debug_extra_checks") &&
inc.full_crc != 0) {
uint64_t f = encode_features;
if (!f) {
f = (mon.quorum_con_features ? mon.quorum_con_features : -1);
}
// encode osdmap to force calculating crcs
bufferlist tbl;
osdm.encode(tbl, f | CEPH_FEATURE_RESERVED);
// decode osdmap to compare crcs with what's expected by incremental
OSDMap tosdm;
tosdm.decode(tbl);
if (tosdm.get_crc() != inc.full_crc) {
derr << __func__
<< " osdmap crc mismatch! (osdmap crc " << tosdm.get_crc()
<< ", expected " << inc.full_crc << ")" << dendl;
ceph_abort_msg("osdmap crc mismatch");
}
}
// note: we cannot add the recently computed map to the cache, as is,
// because we have not encoded the map into a bl.
}
if (!encode_features) {
dout(10) << __func__
<< " last incremental map didn't have features;"
<< " defaulting to quorum's or all" << dendl;
encode_features =
(mon.quorum_con_features ? mon.quorum_con_features : -1);
}
osdm.encode(bl, encode_features | CEPH_FEATURE_RESERVED);
return 0;
}
int OSDMonitor::get_version_full(version_t ver, bufferlist& bl)
{
return get_version_full(ver, mon.get_quorum_con_features(), bl);
}
int OSDMonitor::get_version_full(version_t ver, uint64_t features,
bufferlist& bl)
{
uint64_t significant_features = OSDMap::get_significant_features(features);
if (full_osd_cache.lookup({ver, significant_features}, &bl)) {
return 0;
}
int ret = PaxosService::get_version_full(ver, bl);
if (ret == -ENOENT) {
// build map?
ret = get_full_from_pinned_map(ver, bl);
}
if (ret < 0) {
return ret;
}
// NOTE: this check is imprecise; the OSDMap encoding features may
// be a subset of the latest mon quorum features, but worst case we
// reencode once and then cache the (identical) result under both
// feature masks.
if (significant_features !=
OSDMap::get_significant_features(mon.get_quorum_con_features())) {
reencode_full_map(bl, features);
}
full_osd_cache.add_bytes({ver, significant_features}, bl);
return 0;
}
epoch_t OSDMonitor::blocklist(const entity_addrvec_t& av, utime_t until)
{
dout(10) << "blocklist " << av << " until " << until << dendl;
for (auto a : av.v) {
if (osdmap.require_osd_release >= ceph_release_t::nautilus) {
a.set_type(entity_addr_t::TYPE_ANY);
} else {
a.set_type(entity_addr_t::TYPE_LEGACY);
}
pending_inc.new_blocklist[a] = until;
}
return pending_inc.epoch;
}
epoch_t OSDMonitor::blocklist(entity_addr_t a, utime_t until)
{
if (osdmap.require_osd_release >= ceph_release_t::nautilus) {
a.set_type(entity_addr_t::TYPE_ANY);
} else {
a.set_type(entity_addr_t::TYPE_LEGACY);
}
dout(10) << "blocklist " << a << " until " << until << dendl;
pending_inc.new_blocklist[a] = until;
return pending_inc.epoch;
}
void OSDMonitor::check_osdmap_subs()
{
dout(10) << __func__ << dendl;
if (!osdmap.get_epoch()) {
return;
}
auto osdmap_subs = mon.session_map.subs.find("osdmap");
if (osdmap_subs == mon.session_map.subs.end()) {
return;
}
auto p = osdmap_subs->second->begin();
while (!p.end()) {
auto sub = *p;
++p;
check_osdmap_sub(sub);
}
}
void OSDMonitor::check_osdmap_sub(Subscription *sub)
{
dout(10) << __func__ << " " << sub << " next " << sub->next
<< (sub->onetime ? " (onetime)":" (ongoing)") << dendl;
if (sub->next <= osdmap.get_epoch()) {
if (sub->next >= 1)
send_incremental(sub->next, sub->session, sub->incremental_onetime);
else
sub->session->con->send_message(build_latest_full(sub->session->con_features));
if (sub->onetime)
mon.session_map.remove_sub(sub);
else
sub->next = osdmap.get_epoch() + 1;
}
}
void OSDMonitor::check_pg_creates_subs()
{
if (!osdmap.get_num_up_osds()) {
return;
}
ceph_assert(osdmap.get_up_osd_features() & CEPH_FEATURE_MON_STATEFUL_SUB);
mon.with_session_map([this](const MonSessionMap& session_map) {
auto pg_creates_subs = session_map.subs.find("osd_pg_creates");
if (pg_creates_subs == session_map.subs.end()) {
return;
}
for (auto sub : *pg_creates_subs->second) {
check_pg_creates_sub(sub);
}
});
}
void OSDMonitor::check_pg_creates_sub(Subscription *sub)
{
dout(20) << __func__ << " .. " << sub->session->name << dendl;
ceph_assert(sub->type == "osd_pg_creates");
// only send these if the OSD is up. we will check_subs() when they do
// come up so they will get the creates then.
if (sub->session->name.is_osd() &&
mon.osdmon()->osdmap.is_up(sub->session->name.num())) {
sub->next = send_pg_creates(sub->session->name.num(),
sub->session->con.get(),
sub->next);
}
}
void OSDMonitor::do_application_enable(int64_t pool_id,
const std::string &app_name,
const std::string &app_key,
const std::string &app_value,
bool force)
{
ceph_assert(paxos.is_plugged() && is_writeable());
dout(20) << __func__ << ": pool_id=" << pool_id << ", app_name=" << app_name
<< dendl;
ceph_assert(osdmap.require_osd_release >= ceph_release_t::luminous);
auto pp = osdmap.get_pg_pool(pool_id);
ceph_assert(pp != nullptr);
pg_pool_t p = *pp;
if (pending_inc.new_pools.count(pool_id)) {
p = pending_inc.new_pools[pool_id];
}
if (app_key.empty()) {
p.application_metadata.insert({app_name, {}});
} else {
if (force) {
p.application_metadata[app_name][app_key] = app_value;
} else {
p.application_metadata.insert({app_name, {{app_key, app_value}}});
}
}
p.last_change = pending_inc.epoch;
pending_inc.new_pools[pool_id] = p;
}
void OSDMonitor::do_set_pool_opt(int64_t pool_id,
pool_opts_t::key_t opt,
pool_opts_t::value_t val)
{
dout(10) << __func__ << " pool: " << pool_id << " option: " << opt
<< " val: " << val << dendl;
auto p = pending_inc.new_pools.try_emplace(
pool_id, *osdmap.get_pg_pool(pool_id));
p.first->second.opts.set(opt, val);
}
unsigned OSDMonitor::scan_for_creating_pgs(
const mempool::osdmap::map<int64_t,pg_pool_t>& pools,
const mempool::osdmap::set<int64_t>& removed_pools,
utime_t modified,
creating_pgs_t* creating_pgs) const
{
unsigned queued = 0;
for (auto& p : pools) {
int64_t poolid = p.first;
if (creating_pgs->created_pools.count(poolid)) {
dout(10) << __func__ << " already created " << poolid << dendl;
continue;
}
const pg_pool_t& pool = p.second;
int ruleno = pool.get_crush_rule();
if (ruleno < 0 || !osdmap.crush->rule_exists(ruleno))
continue;
const auto last_scan_epoch = creating_pgs->last_scan_epoch;
const auto created = pool.get_last_change();
if (last_scan_epoch && created <= last_scan_epoch) {
dout(10) << __func__ << " no change in pool " << poolid
<< " " << pool << dendl;
continue;
}
if (removed_pools.count(poolid)) {
dout(10) << __func__ << " pool is being removed: " << poolid
<< " " << pool << dendl;
continue;
}
dout(10) << __func__ << " queueing pool create for " << poolid
<< " " << pool << dendl;
creating_pgs->create_pool(poolid, pool.get_pg_num(),
created, modified);
queued++;
}
return queued;
}
void OSDMonitor::update_creating_pgs()
{
dout(10) << __func__ << " " << creating_pgs.pgs.size() << " pgs creating, "
<< creating_pgs.queue.size() << " pools in queue" << dendl;
decltype(creating_pgs_by_osd_epoch) new_pgs_by_osd_epoch;
std::lock_guard<std::mutex> l(creating_pgs_lock);
for (const auto& pg : creating_pgs.pgs) {
int acting_primary = -1;
auto pgid = pg.first;
if (!osdmap.pg_exists(pgid)) {
dout(20) << __func__ << " ignoring " << pgid << " which should not exist"
<< dendl;
continue;
}
auto mapped = pg.second.create_epoch;
dout(20) << __func__ << " looking up " << pgid << "@" << mapped << dendl;
spg_t spgid(pgid);
mapping.get_primary_and_shard(pgid, &acting_primary, &spgid);
// check the previous creating_pgs, look for the target to whom the pg was
// previously mapped
for (const auto& pgs_by_epoch : creating_pgs_by_osd_epoch) {
const auto last_acting_primary = pgs_by_epoch.first;
for (auto& pgs: pgs_by_epoch.second) {
if (pgs.second.count(spgid)) {
if (last_acting_primary == acting_primary) {
mapped = pgs.first;
} else {
dout(20) << __func__ << " " << pgid << " "
<< " acting_primary:" << last_acting_primary
<< " -> " << acting_primary << dendl;
// note epoch if the target of the create message changed.
mapped = mapping.get_epoch();
}
break;
} else {
// newly creating
mapped = mapping.get_epoch();
}
}
}
dout(10) << __func__ << " will instruct osd." << acting_primary
<< " to create " << pgid << "@" << mapped << dendl;
new_pgs_by_osd_epoch[acting_primary][mapped].insert(spgid);
}
creating_pgs_by_osd_epoch = std::move(new_pgs_by_osd_epoch);
creating_pgs_epoch = mapping.get_epoch();
}
epoch_t OSDMonitor::send_pg_creates(int osd, Connection *con, epoch_t next) const
{
dout(30) << __func__ << " osd." << osd << " next=" << next
<< " " << creating_pgs_by_osd_epoch << dendl;
std::lock_guard<std::mutex> l(creating_pgs_lock);
if (creating_pgs_epoch <= creating_pgs.last_scan_epoch) {
dout(20) << __func__
<< " not using stale creating_pgs@" << creating_pgs_epoch << dendl;
// the subscribers will be updated when the mapping is completed anyway
return next;
}
auto creating_pgs_by_epoch = creating_pgs_by_osd_epoch.find(osd);
if (creating_pgs_by_epoch == creating_pgs_by_osd_epoch.end())
return next;
ceph_assert(!creating_pgs_by_epoch->second.empty());
auto m = make_message<MOSDPGCreate2>(creating_pgs_epoch);
epoch_t last = 0;
for (auto epoch_pgs = creating_pgs_by_epoch->second.lower_bound(next);
epoch_pgs != creating_pgs_by_epoch->second.end(); ++epoch_pgs) {
auto epoch = epoch_pgs->first;
auto& pgs = epoch_pgs->second;
dout(20) << __func__ << " osd." << osd << " from " << next
<< " : epoch " << epoch << " " << pgs.size() << " pgs" << dendl;
last = epoch;
for (auto& pg : pgs) {
// Need the create time from the monitor using its clock to set
// last_scrub_stamp upon pg creation.
auto create = creating_pgs.pgs.find(pg.pgid);
ceph_assert(create != creating_pgs.pgs.end());
m->pgs.emplace(pg, make_pair(create->second.create_epoch,
create->second.create_stamp));
if (create->second.history.epoch_created) {
dout(20) << __func__ << " " << pg << " " << create->second.history
<< " " << create->second.past_intervals << dendl;
m->pg_extra.emplace(pg, make_pair(create->second.history,
create->second.past_intervals));
}
dout(20) << __func__ << " will create " << pg
<< " at " << create->second.create_epoch << dendl;
}
}
if (!m->pgs.empty()) {
con->send_message2(std::move(m));
} else {
dout(20) << __func__ << " osd." << osd << " from " << next
<< " has nothing to send" << dendl;
return next;
}
// sub is current through last + 1
return last + 1;
}
// TICK
void OSDMonitor::tick()
{
if (!is_active()) return;
dout(10) << osdmap << dendl;
// always update osdmap manifest, regardless of being the leader.
load_osdmap_manifest();
// always tune priority cache manager memory on leader and peons
if (ceph_using_tcmalloc() && mon_memory_autotune) {
std::lock_guard l(balancer_lock);
if (pcm != nullptr) {
pcm->tune_memory();
pcm->balance();
_set_new_cache_sizes();
dout(10) << "tick balancer "
<< " inc cache_bytes: " << inc_cache->get_cache_bytes()
<< " inc comtd_bytes: " << inc_cache->get_committed_size()
<< " inc used_bytes: " << inc_cache->_get_used_bytes()
<< " inc num_osdmaps: " << inc_cache->_get_num_osdmaps()
<< dendl;
dout(10) << "tick balancer "
<< " full cache_bytes: " << full_cache->get_cache_bytes()
<< " full comtd_bytes: " << full_cache->get_committed_size()
<< " full used_bytes: " << full_cache->_get_used_bytes()
<< " full num_osdmaps: " << full_cache->_get_num_osdmaps()
<< dendl;
}
}
if (!mon.is_leader()) return;
bool do_propose = false;
utime_t now = ceph_clock_now();
if (handle_osd_timeouts(now, last_osd_report)) {
do_propose = true;
}
// mark osds down?
if (check_failures(now)) {
do_propose = true;
}
// Force a proposal if we need to prune; pruning is performed on
// ``encode_pending()``, hence why we need to regularly trigger a proposal
// even if there's nothing going on.
if (is_prune_enabled() && should_prune()) {
do_propose = true;
}
// mark down osds out?
/* can_mark_out() checks if we can mark osds as being out. The -1 has no
* influence at all. The decision is made based on the ratio of "in" osds,
* and the function returns false if this ratio is lower that the minimum
* ratio set by g_conf()->mon_osd_min_in_ratio. So it's not really up to us.
*/
if (can_mark_out(-1)) {
string down_out_subtree_limit = g_conf().get_val<string>(
"mon_osd_down_out_subtree_limit");
set<int> down_cache; // quick cache of down subtrees
map<int,utime_t>::iterator i = down_pending_out.begin();
while (i != down_pending_out.end()) {
int o = i->first;
utime_t down = now;
down -= i->second;
++i;
if (osdmap.is_down(o) &&
osdmap.is_in(o) &&
can_mark_out(o)) {
utime_t orig_grace(g_conf()->mon_osd_down_out_interval, 0);
utime_t grace = orig_grace;
double my_grace = 0.0;
if (g_conf()->mon_osd_adjust_down_out_interval) {
// scale grace period the same way we do the heartbeat grace.
const osd_xinfo_t& xi = osdmap.get_xinfo(o);
double halflife = (double)g_conf()->mon_osd_laggy_halflife;
double decay_k = ::log(.5) / halflife;
double decay = exp((double)down * decay_k);
dout(20) << "osd." << o << " laggy halflife " << halflife << " decay_k " << decay_k
<< " down for " << down << " decay " << decay << dendl;
my_grace = decay * (double)xi.laggy_interval * xi.laggy_probability;
grace += my_grace;
}
// is this an entire large subtree down?
if (down_out_subtree_limit.length()) {
int type = osdmap.crush->get_type_id(down_out_subtree_limit);
if (type > 0) {
if (osdmap.containing_subtree_is_down(cct, o, type, &down_cache)) {
dout(10) << "tick entire containing " << down_out_subtree_limit
<< " subtree for osd." << o
<< " is down; resetting timer" << dendl;
// reset timer, too.
down_pending_out[o] = now;
continue;
}
}
}
bool down_out = !osdmap.is_destroyed(o) &&
g_conf()->mon_osd_down_out_interval > 0 && down.sec() >= grace;
bool destroyed_out = osdmap.is_destroyed(o) &&
g_conf()->mon_osd_destroyed_out_interval > 0 &&
// this is not precise enough as we did not make a note when this osd
// was marked as destroyed, but let's not bother with that
// complexity for now.
down.sec() >= g_conf()->mon_osd_destroyed_out_interval;
if (down_out || destroyed_out) {
dout(10) << "tick marking osd." << o << " OUT after " << down
<< " sec (target " << grace << " = " << orig_grace << " + " << my_grace << ")" << dendl;
pending_inc.new_weight[o] = CEPH_OSD_OUT;
// set the AUTOOUT bit.
if (pending_inc.new_state.count(o) == 0)
pending_inc.new_state[o] = 0;
pending_inc.new_state[o] |= CEPH_OSD_AUTOOUT;
// remember previous weight
if (pending_inc.new_xinfo.count(o) == 0)
pending_inc.new_xinfo[o] = osdmap.osd_xinfo[o];
pending_inc.new_xinfo[o].old_weight = osdmap.osd_weight[o];
do_propose = true;
mon.clog->info() << "Marking osd." << o << " out (has been down for "
<< int(down.sec()) << " seconds)";
} else
continue;
}
down_pending_out.erase(o);
}
} else {
dout(10) << "tick NOOUT flag set, not checking down osds" << dendl;
}
// expire blocklisted items?
for (ceph::unordered_map<entity_addr_t,utime_t>::iterator p = osdmap.blocklist.begin();
p != osdmap.blocklist.end();
++p) {
if (p->second < now) {
dout(10) << "expiring blocklist item " << p->first << " expired " << p->second << " < now " << now << dendl;
pending_inc.old_blocklist.push_back(p->first);
do_propose = true;
}
}
for (auto p = osdmap.range_blocklist.begin();
p != osdmap.range_blocklist.end();
++p) {
if (p->second < now) {
dout(10) << "expiring range_blocklist item " << p->first
<< " expired " << p->second << " < now " << now << dendl;
pending_inc.old_range_blocklist.push_back(p->first);
do_propose = true;
}
}
if (try_prune_purged_snaps()) {
do_propose = true;
}
if (update_pools_status())
do_propose = true;
if (do_propose ||
!pending_inc.new_pg_temp.empty()) // also propose if we adjusted pg_temp
propose_pending();
}
void OSDMonitor::_set_new_cache_sizes()
{
uint64_t cache_size = 0;
int64_t inc_alloc = 0;
int64_t full_alloc = 0;
int64_t kv_alloc = 0;
if (pcm != nullptr && rocksdb_binned_kv_cache != nullptr) {
cache_size = pcm->get_tuned_mem();
inc_alloc = inc_cache->get_committed_size();
full_alloc = full_cache->get_committed_size();
kv_alloc = rocksdb_binned_kv_cache->get_committed_size();
}
inc_osd_cache.set_bytes(inc_alloc);
full_osd_cache.set_bytes(full_alloc);
dout(1) << __func__ << " cache_size:" << cache_size
<< " inc_alloc: " << inc_alloc
<< " full_alloc: " << full_alloc
<< " kv_alloc: " << kv_alloc
<< dendl;
}
bool OSDMonitor::handle_osd_timeouts(const utime_t &now,
std::map<int, std::pair<utime_t, int>> &last_osd_report)
{
utime_t timeo(g_conf()->mon_osd_report_timeout, 0);
if (now - mon.get_leader_since() < timeo) {
// We haven't been the leader for long enough to consider OSD timeouts
return false;
}
int max_osd = osdmap.get_max_osd();
bool new_down = false;
for (int i=0; i < max_osd; ++i) {
dout(30) << __func__ << ": checking up on osd " << i << dendl;
if (!osdmap.exists(i)) {
last_osd_report.erase(i); // if any
continue;
}
if (!osdmap.is_up(i))
continue;
const std::map<int, std::pair<utime_t, int>>::const_iterator t = last_osd_report.find(i);
if (t == last_osd_report.end()) {
// it wasn't in the map; start the timer.
last_osd_report[i].first = now;
last_osd_report[i].second = 0;
} else if (can_mark_down(i)) {
utime_t diff = now - t->second.first;
// we use the max(mon_osd_report_timeout, 2*osd_beacon_report_interval) as timeout
// to allow for the osd to miss a beacon.
int mon_osd_report_timeout = g_conf()->mon_osd_report_timeout;
utime_t max_timeout(std::max(mon_osd_report_timeout, 2 * t->second.second), 0);
if (diff > max_timeout) {
mon.clog->info() << "osd." << i << " marked down after no beacon for "
<< diff << " seconds";
derr << "no beacon from osd." << i << " since " << t->second.first
<< ", " << diff << " seconds ago. marking down" << dendl;
pending_inc.new_state[i] = CEPH_OSD_UP;
new_down = true;
}
}
}
return new_down;
}
static void dump_cpu_list(Formatter *f, const char *name,
const string& strlist)
{
cpu_set_t cpu_set;
size_t cpu_set_size;
if (parse_cpu_set_list(strlist.c_str(), &cpu_set_size, &cpu_set) < 0) {
return;
}
set<int> cpus = cpu_set_to_set(cpu_set_size, &cpu_set);
f->open_array_section(name);
for (auto cpu : cpus) {
f->dump_int("cpu", cpu);
}
f->close_section();
}
void OSDMonitor::dump_info(Formatter *f)
{
f->open_object_section("osdmap");
osdmap.dump(f, cct);
f->close_section();
f->open_array_section("osd_metadata");
for (int i=0; i<osdmap.get_max_osd(); ++i) {
if (osdmap.exists(i)) {
f->open_object_section("osd");
f->dump_unsigned("id", i);
dump_osd_metadata(i, f, NULL);
f->close_section();
}
}
f->close_section();
f->open_object_section("osdmap_clean_epochs");
f->dump_unsigned("min_last_epoch_clean", get_min_last_epoch_clean());
f->open_object_section("last_epoch_clean");
last_epoch_clean.dump(f);
f->close_section();
f->open_array_section("osd_epochs");
for (auto& osd_epoch : osd_epochs) {
f->open_object_section("osd");
f->dump_unsigned("id", osd_epoch.first);
f->dump_unsigned("epoch", osd_epoch.second);
f->close_section();
}
f->close_section(); // osd_epochs
f->close_section(); // osd_clean_epochs
f->dump_unsigned("osdmap_first_committed", get_first_committed());
f->dump_unsigned("osdmap_last_committed", get_last_committed());
f->open_object_section("crushmap");
osdmap.crush->dump(f);
f->close_section();
if (has_osdmap_manifest) {
f->open_object_section("osdmap_manifest");
osdmap_manifest.dump(f);
f->close_section();
}
}
namespace {
enum osd_pool_get_choices {
SIZE, MIN_SIZE,
PG_NUM, PGP_NUM, CRUSH_RULE, HASHPSPOOL, EC_OVERWRITES,
NODELETE, NOPGCHANGE, NOSIZECHANGE,
WRITE_FADVISE_DONTNEED, NOSCRUB, NODEEP_SCRUB,
HIT_SET_TYPE, HIT_SET_PERIOD, HIT_SET_COUNT, HIT_SET_FPP,
USE_GMT_HITSET, TARGET_MAX_OBJECTS, TARGET_MAX_BYTES,
CACHE_TARGET_DIRTY_RATIO, CACHE_TARGET_DIRTY_HIGH_RATIO,
CACHE_TARGET_FULL_RATIO,
CACHE_MIN_FLUSH_AGE, CACHE_MIN_EVICT_AGE,
ERASURE_CODE_PROFILE, MIN_READ_RECENCY_FOR_PROMOTE,
MIN_WRITE_RECENCY_FOR_PROMOTE, FAST_READ,
HIT_SET_GRADE_DECAY_RATE, HIT_SET_SEARCH_LAST_N,
SCRUB_MIN_INTERVAL, SCRUB_MAX_INTERVAL, DEEP_SCRUB_INTERVAL,
RECOVERY_PRIORITY, RECOVERY_OP_PRIORITY, SCRUB_PRIORITY,
COMPRESSION_MODE, COMPRESSION_ALGORITHM, COMPRESSION_REQUIRED_RATIO,
COMPRESSION_MAX_BLOB_SIZE, COMPRESSION_MIN_BLOB_SIZE,
CSUM_TYPE, CSUM_MAX_BLOCK, CSUM_MIN_BLOCK, FINGERPRINT_ALGORITHM,
PG_AUTOSCALE_MODE, PG_NUM_MIN, TARGET_SIZE_BYTES, TARGET_SIZE_RATIO,
PG_AUTOSCALE_BIAS, DEDUP_TIER, DEDUP_CHUNK_ALGORITHM,
DEDUP_CDC_CHUNK_SIZE, POOL_EIO, BULK, PG_NUM_MAX };
std::set<osd_pool_get_choices>
subtract_second_from_first(const std::set<osd_pool_get_choices>& first,
const std::set<osd_pool_get_choices>& second)
{
std::set<osd_pool_get_choices> result;
std::set_difference(first.begin(), first.end(),
second.begin(), second.end(),
std::inserter(result, result.end()));
return result;
}
}
bool OSDMonitor::preprocess_command(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MMonCommand>();
int r = 0;
bufferlist rdata;
stringstream ss, ds;
cmdmap_t cmdmap;
if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) {
string rs = ss.str();
mon.reply_command(op, -EINVAL, rs, get_last_committed());
return true;
}
MonSession *session = op->get_session();
if (!session) {
derr << __func__ << " no session" << dendl;
mon.reply_command(op, -EACCES, "access denied", get_last_committed());
return true;
}
string prefix;
cmd_getval(cmdmap, "prefix", prefix);
string format = cmd_getval_or<string>(cmdmap, "format", "plain");
boost::scoped_ptr<Formatter> f(Formatter::create(format));
if (prefix == "osd stat") {
if (f) {
f->open_object_section("osdmap");
osdmap.print_summary(f.get(), ds, "", true);
f->close_section();
f->flush(rdata);
} else {
osdmap.print_summary(nullptr, ds, "", true);
rdata.append(ds);
}
}
else if (prefix == "osd dump" ||
prefix == "osd tree" ||
prefix == "osd tree-from" ||
prefix == "osd ls" ||
prefix == "osd getmap" ||
prefix == "osd getcrushmap" ||
prefix == "osd ls-tree" ||
prefix == "osd info") {
epoch_t epoch = cmd_getval_or<int64_t>(cmdmap, "epoch", osdmap.get_epoch());
bufferlist osdmap_bl;
int err = get_version_full(epoch, osdmap_bl);
if (err == -ENOENT) {
r = -ENOENT;
ss << "there is no map for epoch " << epoch;
goto reply;
}
ceph_assert(err == 0);
ceph_assert(osdmap_bl.length());
OSDMap *p;
if (epoch == osdmap.get_epoch()) {
p = &osdmap;
} else {
p = new OSDMap;
p->decode(osdmap_bl);
}
auto sg = make_scope_guard([&] {
if (p != &osdmap) {
delete p;
}
});
if (prefix == "osd dump") {
stringstream ds;
if (f) {
f->open_object_section("osdmap");
p->dump(f.get(), cct);
f->close_section();
f->flush(ds);
} else {
p->print(cct, ds);
}
rdata.append(ds);
if (!f)
ds << " ";
} else if (prefix == "osd ls") {
if (f) {
f->open_array_section("osds");
for (int i = 0; i < osdmap.get_max_osd(); i++) {
if (osdmap.exists(i)) {
f->dump_int("osd", i);
}
}
f->close_section();
f->flush(ds);
} else {
bool first = true;
for (int i = 0; i < osdmap.get_max_osd(); i++) {
if (osdmap.exists(i)) {
if (!first)
ds << "\n";
first = false;
ds << i;
}
}
}
rdata.append(ds);
} else if (prefix == "osd info") {
int64_t osd_id;
bool do_single_osd = true;
if (!cmd_getval(cmdmap, "id", osd_id)) {
do_single_osd = false;
}
if (do_single_osd && !osdmap.exists(osd_id)) {
ss << "osd." << osd_id << " does not exist";
r = -EINVAL;
goto reply;
}
if (f) {
if (do_single_osd) {
osdmap.dump_osd(osd_id, f.get());
} else {
osdmap.dump_osds(f.get());
}
f->flush(ds);
} else {
if (do_single_osd) {
osdmap.print_osd(osd_id, ds);
} else {
osdmap.print_osds(ds);
}
}
rdata.append(ds);
} else if (prefix == "osd tree" || prefix == "osd tree-from") {
string bucket;
if (prefix == "osd tree-from") {
cmd_getval(cmdmap, "bucket", bucket);
if (!osdmap.crush->name_exists(bucket)) {
ss << "bucket '" << bucket << "' does not exist";
r = -ENOENT;
goto reply;
}
int id = osdmap.crush->get_item_id(bucket);
if (id >= 0) {
ss << "\"" << bucket << "\" is not a bucket";
r = -EINVAL;
goto reply;
}
}
vector<string> states;
cmd_getval(cmdmap, "states", states);
unsigned filter = 0;
for (auto& s : states) {
if (s == "up") {
filter |= OSDMap::DUMP_UP;
} else if (s == "down") {
filter |= OSDMap::DUMP_DOWN;
} else if (s == "in") {
filter |= OSDMap::DUMP_IN;
} else if (s == "out") {
filter |= OSDMap::DUMP_OUT;
} else if (s == "destroyed") {
filter |= OSDMap::DUMP_DESTROYED;
} else {
ss << "unrecognized state '" << s << "'";
r = -EINVAL;
goto reply;
}
}
if ((filter & (OSDMap::DUMP_IN|OSDMap::DUMP_OUT)) ==
(OSDMap::DUMP_IN|OSDMap::DUMP_OUT)) {
ss << "cannot specify both 'in' and 'out'";
r = -EINVAL;
goto reply;
}
if (((filter & (OSDMap::DUMP_UP|OSDMap::DUMP_DOWN)) ==
(OSDMap::DUMP_UP|OSDMap::DUMP_DOWN)) ||
((filter & (OSDMap::DUMP_UP|OSDMap::DUMP_DESTROYED)) ==
(OSDMap::DUMP_UP|OSDMap::DUMP_DESTROYED)) ||
((filter & (OSDMap::DUMP_DOWN|OSDMap::DUMP_DESTROYED)) ==
(OSDMap::DUMP_DOWN|OSDMap::DUMP_DESTROYED))) {
ss << "can specify only one of 'up', 'down' and 'destroyed'";
r = -EINVAL;
goto reply;
}
if (f) {
f->open_object_section("tree");
p->print_tree(f.get(), NULL, filter, bucket);
f->close_section();
f->flush(ds);
} else {
p->print_tree(NULL, &ds, filter, bucket);
}
rdata.append(ds);
} else if (prefix == "osd getmap") {
rdata.append(osdmap_bl);
ss << "got osdmap epoch " << p->get_epoch();
} else if (prefix == "osd getcrushmap") {
p->crush->encode(rdata, mon.get_quorum_con_features());
ss << p->get_crush_version();
} else if (prefix == "osd ls-tree") {
string bucket_name;
cmd_getval(cmdmap, "name", bucket_name);
set<int> osds;
r = p->get_osds_by_bucket_name(bucket_name, &osds);
if (r == -ENOENT) {
ss << "\"" << bucket_name << "\" does not exist";
goto reply;
} else if (r < 0) {
ss << "can not parse bucket name:\"" << bucket_name << "\"";
goto reply;
}
if (f) {
f->open_array_section("osds");
for (auto &i : osds) {
if (osdmap.exists(i)) {
f->dump_int("osd", i);
}
}
f->close_section();
f->flush(ds);
} else {
bool first = true;
for (auto &i : osds) {
if (osdmap.exists(i)) {
if (!first)
ds << "\n";
first = false;
ds << i;
}
}
}
rdata.append(ds);
}
} else if (prefix == "osd getmaxosd") {
if (f) {
f->open_object_section("getmaxosd");
f->dump_unsigned("epoch", osdmap.get_epoch());
f->dump_int("max_osd", osdmap.get_max_osd());
f->close_section();
f->flush(rdata);
} else {
ds << "max_osd = " << osdmap.get_max_osd() << " in epoch " << osdmap.get_epoch();
rdata.append(ds);
}
} else if (prefix == "osd utilization") {
string out;
osdmap.summarize_mapping_stats(NULL, NULL, &out, f.get());
if (f)
f->flush(rdata);
else
rdata.append(out);
r = 0;
goto reply;
} else if (prefix == "osd find") {
int64_t osd;
if (!cmd_getval(cmdmap, "id", osd)) {
ss << "unable to parse osd id value '"
<< cmd_vartype_stringify(cmdmap["id"]) << "'";
r = -EINVAL;
goto reply;
}
if (!osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist";
r = -ENOENT;
goto reply;
}
string format;
cmd_getval(cmdmap, "format", format);
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty", "json-pretty"));
f->open_object_section("osd_location");
f->dump_int("osd", osd);
f->dump_object("addrs", osdmap.get_addrs(osd));
f->dump_stream("osd_fsid") << osdmap.get_uuid(osd);
// try to identify host, pod/container name, etc.
map<string,string> m;
load_metadata(osd, m, nullptr);
if (auto p = m.find("hostname"); p != m.end()) {
f->dump_string("host", p->second);
}
for (auto& k : {
"pod_name", "pod_namespace", // set by rook
"container_name" // set by cephadm, ceph-ansible
}) {
if (auto p = m.find(k); p != m.end()) {
f->dump_string(k, p->second);
}
}
// crush is helpful too
f->open_object_section("crush_location");
map<string,string> loc = osdmap.crush->get_full_location(osd);
for (map<string,string>::iterator p = loc.begin(); p != loc.end(); ++p)
f->dump_string(p->first.c_str(), p->second);
f->close_section();
f->close_section();
f->flush(rdata);
} else if (prefix == "osd metadata") {
int64_t osd = -1;
if (cmd_vartype_stringify(cmdmap["id"]).size() &&
!cmd_getval(cmdmap, "id", osd)) {
ss << "unable to parse osd id value '"
<< cmd_vartype_stringify(cmdmap["id"]) << "'";
r = -EINVAL;
goto reply;
}
if (osd >= 0 && !osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist";
r = -ENOENT;
goto reply;
}
string format;
cmd_getval(cmdmap, "format", format);
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty", "json-pretty"));
if (osd >= 0) {
f->open_object_section("osd_metadata");
f->dump_unsigned("id", osd);
r = dump_osd_metadata(osd, f.get(), &ss);
if (r < 0)
goto reply;
f->close_section();
} else {
r = 0;
f->open_array_section("osd_metadata");
for (int i=0; i<osdmap.get_max_osd(); ++i) {
if (osdmap.exists(i)) {
f->open_object_section("osd");
f->dump_unsigned("id", i);
r = dump_osd_metadata(i, f.get(), NULL);
if (r == -EINVAL || r == -ENOENT) {
// Drop error, continue to get other daemons' metadata
dout(4) << "No metadata for osd." << i << dendl;
r = 0;
} else if (r < 0) {
// Unexpected error
goto reply;
}
f->close_section();
}
}
f->close_section();
}
f->flush(rdata);
} else if (prefix == "osd versions") {
if (!f)
f.reset(Formatter::create("json-pretty"));
count_metadata("ceph_version", f.get());
f->flush(rdata);
r = 0;
} else if (prefix == "osd count-metadata") {
if (!f)
f.reset(Formatter::create("json-pretty"));
string field;
cmd_getval(cmdmap, "property", field);
count_metadata(field, f.get());
f->flush(rdata);
r = 0;
} else if (prefix == "osd numa-status") {
TextTable tbl;
if (f) {
f->open_array_section("osds");
} else {
tbl.define_column("OSD", TextTable::LEFT, TextTable::RIGHT);
tbl.define_column("HOST", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("NETWORK", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("STORAGE", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("AFFINITY", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("CPUS", TextTable::LEFT, TextTable::LEFT);
}
for (int i=0; i<osdmap.get_max_osd(); ++i) {
if (osdmap.exists(i)) {
map<string,string> m;
ostringstream err;
if (load_metadata(i, m, &err) < 0) {
continue;
}
string host;
auto p = m.find("hostname");
if (p != m.end()) {
host = p->second;
}
if (f) {
f->open_object_section("osd");
f->dump_int("osd", i);
f->dump_string("host", host);
for (auto n : { "network_numa_node", "objectstore_numa_node",
"numa_node" }) {
p = m.find(n);
if (p != m.end()) {
f->dump_int(n, atoi(p->second.c_str()));
}
}
for (auto n : { "network_numa_nodes", "objectstore_numa_nodes" }) {
p = m.find(n);
if (p != m.end()) {
list<string> ls = get_str_list(p->second, ",");
f->open_array_section(n);
for (auto node : ls) {
f->dump_int("node", atoi(node.c_str()));
}
f->close_section();
}
}
for (auto n : { "numa_node_cpus" }) {
p = m.find(n);
if (p != m.end()) {
dump_cpu_list(f.get(), n, p->second);
}
}
f->close_section();
} else {
tbl << i;
tbl << host;
p = m.find("network_numa_nodes");
if (p != m.end()) {
tbl << p->second;
} else {
tbl << "-";
}
p = m.find("objectstore_numa_nodes");
if (p != m.end()) {
tbl << p->second;
} else {
tbl << "-";
}
p = m.find("numa_node");
auto q = m.find("numa_node_cpus");
if (p != m.end() && q != m.end()) {
tbl << p->second;
tbl << q->second;
} else {
tbl << "-";
tbl << "-";
}
tbl << TextTable::endrow;
}
}
}
if (f) {
f->close_section();
f->flush(rdata);
} else {
rdata.append(stringify(tbl));
}
} else if (prefix == "osd map") {
string poolstr, objstr, namespacestr;
cmd_getval(cmdmap, "pool", poolstr);
cmd_getval(cmdmap, "object", objstr);
cmd_getval(cmdmap, "nspace", namespacestr);
int64_t pool = osdmap.lookup_pg_pool_name(poolstr.c_str());
if (pool < 0) {
ss << "pool " << poolstr << " does not exist";
r = -ENOENT;
goto reply;
}
object_locator_t oloc(pool, namespacestr);
object_t oid(objstr);
pg_t pgid = osdmap.object_locator_to_pg(oid, oloc);
pg_t mpgid = osdmap.raw_pg_to_pg(pgid);
vector<int> up, acting;
int up_p, acting_p;
osdmap.pg_to_up_acting_osds(mpgid, &up, &up_p, &acting, &acting_p);
string fullobjname;
if (!namespacestr.empty())
fullobjname = namespacestr + string("/") + oid.name;
else
fullobjname = oid.name;
if (f) {
f->open_object_section("osd_map");
f->dump_unsigned("epoch", osdmap.get_epoch());
f->dump_string("pool", poolstr);
f->dump_int("pool_id", pool);
f->dump_stream("objname") << fullobjname;
f->dump_stream("raw_pgid") << pgid;
f->dump_stream("pgid") << mpgid;
f->open_array_section("up");
for (vector<int>::iterator p = up.begin(); p != up.end(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->dump_int("up_primary", up_p);
f->open_array_section("acting");
for (vector<int>::iterator p = acting.begin(); p != acting.end(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->dump_int("acting_primary", acting_p);
f->close_section(); // osd_map
f->flush(rdata);
} else {
ds << "osdmap e" << osdmap.get_epoch()
<< " pool '" << poolstr << "' (" << pool << ")"
<< " object '" << fullobjname << "' ->"
<< " pg " << pgid << " (" << mpgid << ")"
<< " -> up (" << pg_vector_string(up) << ", p" << up_p << ") acting ("
<< pg_vector_string(acting) << ", p" << acting_p << ")";
rdata.append(ds);
}
} else if (prefix == "pg map") {
pg_t pgid;
vector<int> up, acting;
r = parse_pgid(cmdmap, ss, pgid);
if (r < 0)
goto reply;
pg_t mpgid = osdmap.raw_pg_to_pg(pgid);
osdmap.pg_to_up_acting_osds(pgid, up, acting);
if (f) {
f->open_object_section("pg_map");
f->dump_unsigned("epoch", osdmap.get_epoch());
f->dump_stream("raw_pgid") << pgid;
f->dump_stream("pgid") << mpgid;
f->open_array_section("up");
for (auto osd : up) {
f->dump_int("up_osd", osd);
}
f->close_section();
f->open_array_section("acting");
for (auto osd : acting) {
f->dump_int("acting_osd", osd);
}
f->close_section();
f->close_section();
f->flush(rdata);
} else {
ds << "osdmap e" << osdmap.get_epoch()
<< " pg " << pgid << " (" << mpgid << ")"
<< " -> up " << up << " acting " << acting;
rdata.append(ds);
}
goto reply;
} else if (prefix == "osd lspools") {
if (f)
f->open_array_section("pools");
for (map<int64_t, pg_pool_t>::iterator p = osdmap.pools.begin();
p != osdmap.pools.end();
++p) {
if (f) {
f->open_object_section("pool");
f->dump_int("poolnum", p->first);
f->dump_string("poolname", osdmap.pool_name[p->first]);
f->close_section();
} else {
ds << p->first << ' ' << osdmap.pool_name[p->first];
if (next(p) != osdmap.pools.end()) {
ds << '\n';
}
}
}
if (f) {
f->close_section();
f->flush(ds);
}
rdata.append(ds);
} else if (prefix == "osd blocklist ls" ||
prefix == "osd blacklist ls") {
if (f)
f->open_array_section("blocklist");
for (ceph::unordered_map<entity_addr_t,utime_t>::iterator p = osdmap.blocklist.begin();
p != osdmap.blocklist.end();
++p) {
if (f) {
f->open_object_section("entry");
f->dump_string("addr", p->first.get_legacy_str());
f->dump_stream("until") << p->second;
f->close_section();
} else {
stringstream ss;
string s;
ss << p->first << " " << p->second;
getline(ss, s);
s += "\n";
rdata.append(s);
}
}
if (f) {
f->close_section();
f->flush(rdata);
}
if (f)
f->open_array_section("range_blocklist");
for (auto p = osdmap.range_blocklist.begin();
p != osdmap.range_blocklist.end();
++p) {
if (f) {
f->open_object_section("entry");
f->dump_string("range", p->first.get_legacy_str());
f->dump_stream("until") << p->second;
f->close_section();
} else {
stringstream ss;
string s;
ss << p->first << " " << p->second;
getline(ss, s);
s += "\n";
rdata.append(s);
}
}
if (f) {
f->close_section();
f->flush(rdata);
}
ss << "listed " << osdmap.blocklist.size() + osdmap.range_blocklist.size() << " entries";
} else if (prefix == "osd pool ls") {
string detail;
cmd_getval(cmdmap, "detail", detail);
if (!f && detail == "detail") {
ostringstream ss;
osdmap.print_pools(cct, ss);
rdata.append(ss.str());
} else {
if (f)
f->open_array_section("pools");
for (auto &[pid, pdata] : osdmap.get_pools()) {
if (f) {
if (detail == "detail") {
f->open_object_section("pool");
f->dump_int("pool_id", pid);
f->dump_string("pool_name", osdmap.get_pool_name(pid));
pdata.dump(f.get());
osdmap.dump_read_balance_score(cct, pid, pdata, f.get());
f->close_section();
} else {
f->dump_string("pool_name", osdmap.get_pool_name(pid));
}
} else {
rdata.append(osdmap.get_pool_name(pid) + "\n");
}
}
if (f) {
f->close_section();
f->flush(rdata);
}
}
} else if (prefix == "osd crush get-tunable") {
string tunable;
cmd_getval(cmdmap, "tunable", tunable);
ostringstream rss;
if (f)
f->open_object_section("tunable");
if (tunable == "straw_calc_version") {
if (f)
f->dump_int(tunable.c_str(), osdmap.crush->get_straw_calc_version());
else
rss << osdmap.crush->get_straw_calc_version() << "\n";
} else {
r = -EINVAL;
goto reply;
}
if (f) {
f->close_section();
f->flush(rdata);
} else {
rdata.append(rss.str());
}
r = 0;
} else if (prefix == "osd pool get") {
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool = osdmap.lookup_pg_pool_name(poolstr.c_str());
if (pool < 0) {
ss << "unrecognized pool '" << poolstr << "'";
r = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(pool);
string var;
cmd_getval(cmdmap, "var", var);
typedef std::map<std::string, osd_pool_get_choices> choices_map_t;
const choices_map_t ALL_CHOICES = {
{"size", SIZE},
{"min_size", MIN_SIZE},
{"pg_num", PG_NUM}, {"pgp_num", PGP_NUM},
{"crush_rule", CRUSH_RULE},
{"hashpspool", HASHPSPOOL},
{"eio", POOL_EIO},
{"allow_ec_overwrites", EC_OVERWRITES}, {"nodelete", NODELETE},
{"nopgchange", NOPGCHANGE}, {"nosizechange", NOSIZECHANGE},
{"noscrub", NOSCRUB}, {"nodeep-scrub", NODEEP_SCRUB},
{"write_fadvise_dontneed", WRITE_FADVISE_DONTNEED},
{"hit_set_type", HIT_SET_TYPE}, {"hit_set_period", HIT_SET_PERIOD},
{"hit_set_count", HIT_SET_COUNT}, {"hit_set_fpp", HIT_SET_FPP},
{"use_gmt_hitset", USE_GMT_HITSET},
{"target_max_objects", TARGET_MAX_OBJECTS},
{"target_max_bytes", TARGET_MAX_BYTES},
{"cache_target_dirty_ratio", CACHE_TARGET_DIRTY_RATIO},
{"cache_target_dirty_high_ratio", CACHE_TARGET_DIRTY_HIGH_RATIO},
{"cache_target_full_ratio", CACHE_TARGET_FULL_RATIO},
{"cache_min_flush_age", CACHE_MIN_FLUSH_AGE},
{"cache_min_evict_age", CACHE_MIN_EVICT_AGE},
{"erasure_code_profile", ERASURE_CODE_PROFILE},
{"min_read_recency_for_promote", MIN_READ_RECENCY_FOR_PROMOTE},
{"min_write_recency_for_promote", MIN_WRITE_RECENCY_FOR_PROMOTE},
{"fast_read", FAST_READ},
{"hit_set_grade_decay_rate", HIT_SET_GRADE_DECAY_RATE},
{"hit_set_search_last_n", HIT_SET_SEARCH_LAST_N},
{"scrub_min_interval", SCRUB_MIN_INTERVAL},
{"scrub_max_interval", SCRUB_MAX_INTERVAL},
{"deep_scrub_interval", DEEP_SCRUB_INTERVAL},
{"recovery_priority", RECOVERY_PRIORITY},
{"recovery_op_priority", RECOVERY_OP_PRIORITY},
{"scrub_priority", SCRUB_PRIORITY},
{"compression_mode", COMPRESSION_MODE},
{"compression_algorithm", COMPRESSION_ALGORITHM},
{"compression_required_ratio", COMPRESSION_REQUIRED_RATIO},
{"compression_max_blob_size", COMPRESSION_MAX_BLOB_SIZE},
{"compression_min_blob_size", COMPRESSION_MIN_BLOB_SIZE},
{"csum_type", CSUM_TYPE},
{"csum_max_block", CSUM_MAX_BLOCK},
{"csum_min_block", CSUM_MIN_BLOCK},
{"fingerprint_algorithm", FINGERPRINT_ALGORITHM},
{"pg_autoscale_mode", PG_AUTOSCALE_MODE},
{"pg_num_min", PG_NUM_MIN},
{"pg_num_max", PG_NUM_MAX},
{"target_size_bytes", TARGET_SIZE_BYTES},
{"target_size_ratio", TARGET_SIZE_RATIO},
{"pg_autoscale_bias", PG_AUTOSCALE_BIAS},
{"dedup_tier", DEDUP_TIER},
{"dedup_chunk_algorithm", DEDUP_CHUNK_ALGORITHM},
{"dedup_cdc_chunk_size", DEDUP_CDC_CHUNK_SIZE},
{"bulk", BULK}
};
typedef std::set<osd_pool_get_choices> choices_set_t;
const choices_set_t ONLY_TIER_CHOICES = {
HIT_SET_TYPE, HIT_SET_PERIOD, HIT_SET_COUNT, HIT_SET_FPP,
TARGET_MAX_OBJECTS, TARGET_MAX_BYTES, CACHE_TARGET_FULL_RATIO,
CACHE_TARGET_DIRTY_RATIO, CACHE_TARGET_DIRTY_HIGH_RATIO,
CACHE_MIN_FLUSH_AGE, CACHE_MIN_EVICT_AGE,
MIN_READ_RECENCY_FOR_PROMOTE,
MIN_WRITE_RECENCY_FOR_PROMOTE,
HIT_SET_GRADE_DECAY_RATE, HIT_SET_SEARCH_LAST_N
};
const choices_set_t ONLY_ERASURE_CHOICES = {
EC_OVERWRITES, ERASURE_CODE_PROFILE
};
choices_set_t selected_choices;
if (var == "all") {
for(choices_map_t::const_iterator it = ALL_CHOICES.begin();
it != ALL_CHOICES.end(); ++it) {
selected_choices.insert(it->second);
}
if(!p->is_tier()) {
selected_choices = subtract_second_from_first(selected_choices,
ONLY_TIER_CHOICES);
}
if(!p->is_erasure()) {
selected_choices = subtract_second_from_first(selected_choices,
ONLY_ERASURE_CHOICES);
}
} else /* var != "all" */ {
choices_map_t::const_iterator found = ALL_CHOICES.find(var);
if (found == ALL_CHOICES.end()) {
ss << "pool '" << poolstr
<< "': invalid variable: '" << var << "'";
r = -EINVAL;
goto reply;
}
osd_pool_get_choices selected = found->second;
if (!p->is_tier() &&
ONLY_TIER_CHOICES.find(selected) != ONLY_TIER_CHOICES.end()) {
ss << "pool '" << poolstr
<< "' is not a tier pool: variable not applicable";
r = -EACCES;
goto reply;
}
if (!p->is_erasure() &&
ONLY_ERASURE_CHOICES.find(selected)
!= ONLY_ERASURE_CHOICES.end()) {
ss << "pool '" << poolstr
<< "' is not a erasure pool: variable not applicable";
r = -EACCES;
goto reply;
}
if (pool_opts_t::is_opt_name(var) &&
!p->opts.is_set(pool_opts_t::get_opt_desc(var).key)) {
ss << "option '" << var << "' is not set on pool '" << poolstr << "'";
r = -ENOENT;
goto reply;
}
selected_choices.insert(selected);
}
if (f) {
f->open_object_section("pool");
f->dump_string("pool", poolstr);
f->dump_int("pool_id", pool);
for(choices_set_t::const_iterator it = selected_choices.begin();
it != selected_choices.end(); ++it) {
choices_map_t::const_iterator i;
for (i = ALL_CHOICES.begin(); i != ALL_CHOICES.end(); ++i) {
if (i->second == *it) {
break;
}
}
ceph_assert(i != ALL_CHOICES.end());
switch(*it) {
case PG_NUM:
f->dump_int("pg_num", p->get_pg_num());
break;
case PGP_NUM:
f->dump_int("pgp_num", p->get_pgp_num());
break;
case SIZE:
f->dump_int("size", p->get_size());
break;
case MIN_SIZE:
f->dump_int("min_size", p->get_min_size());
break;
case CRUSH_RULE:
if (osdmap.crush->rule_exists(p->get_crush_rule())) {
f->dump_string("crush_rule", osdmap.crush->get_rule_name(
p->get_crush_rule()));
} else {
f->dump_string("crush_rule", stringify(p->get_crush_rule()));
}
break;
case EC_OVERWRITES:
f->dump_bool("allow_ec_overwrites",
p->has_flag(pg_pool_t::FLAG_EC_OVERWRITES));
break;
case PG_AUTOSCALE_MODE:
f->dump_string("pg_autoscale_mode",
pg_pool_t::get_pg_autoscale_mode_name(
p->pg_autoscale_mode));
break;
case HASHPSPOOL:
case POOL_EIO:
case NODELETE:
case BULK:
case NOPGCHANGE:
case NOSIZECHANGE:
case WRITE_FADVISE_DONTNEED:
case NOSCRUB:
case NODEEP_SCRUB:
f->dump_bool(i->first.c_str(),
p->has_flag(pg_pool_t::get_flag_by_name(i->first)));
break;
case HIT_SET_PERIOD:
f->dump_int("hit_set_period", p->hit_set_period);
break;
case HIT_SET_COUNT:
f->dump_int("hit_set_count", p->hit_set_count);
break;
case HIT_SET_TYPE:
f->dump_string("hit_set_type",
HitSet::get_type_name(p->hit_set_params.get_type()));
break;
case HIT_SET_FPP:
{
if (p->hit_set_params.get_type() == HitSet::TYPE_BLOOM) {
BloomHitSet::Params *bloomp =
static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
f->dump_float("hit_set_fpp", bloomp->get_fpp());
} else if(var != "all") {
f->close_section();
ss << "hit set is not of type Bloom; " <<
"invalid to get a false positive rate!";
r = -EINVAL;
goto reply;
}
}
break;
case USE_GMT_HITSET:
f->dump_bool("use_gmt_hitset", p->use_gmt_hitset);
break;
case TARGET_MAX_OBJECTS:
f->dump_unsigned("target_max_objects", p->target_max_objects);
break;
case TARGET_MAX_BYTES:
f->dump_unsigned("target_max_bytes", p->target_max_bytes);
break;
case CACHE_TARGET_DIRTY_RATIO:
f->dump_unsigned("cache_target_dirty_ratio_micro",
p->cache_target_dirty_ratio_micro);
f->dump_float("cache_target_dirty_ratio",
((float)p->cache_target_dirty_ratio_micro/1000000));
break;
case CACHE_TARGET_DIRTY_HIGH_RATIO:
f->dump_unsigned("cache_target_dirty_high_ratio_micro",
p->cache_target_dirty_high_ratio_micro);
f->dump_float("cache_target_dirty_high_ratio",
((float)p->cache_target_dirty_high_ratio_micro/1000000));
break;
case CACHE_TARGET_FULL_RATIO:
f->dump_unsigned("cache_target_full_ratio_micro",
p->cache_target_full_ratio_micro);
f->dump_float("cache_target_full_ratio",
((float)p->cache_target_full_ratio_micro/1000000));
break;
case CACHE_MIN_FLUSH_AGE:
f->dump_unsigned("cache_min_flush_age", p->cache_min_flush_age);
break;
case CACHE_MIN_EVICT_AGE:
f->dump_unsigned("cache_min_evict_age", p->cache_min_evict_age);
break;
case ERASURE_CODE_PROFILE:
f->dump_string("erasure_code_profile", p->erasure_code_profile);
break;
case MIN_READ_RECENCY_FOR_PROMOTE:
f->dump_int("min_read_recency_for_promote",
p->min_read_recency_for_promote);
break;
case MIN_WRITE_RECENCY_FOR_PROMOTE:
f->dump_int("min_write_recency_for_promote",
p->min_write_recency_for_promote);
break;
case FAST_READ:
f->dump_int("fast_read", p->fast_read);
break;
case HIT_SET_GRADE_DECAY_RATE:
f->dump_int("hit_set_grade_decay_rate",
p->hit_set_grade_decay_rate);
break;
case HIT_SET_SEARCH_LAST_N:
f->dump_int("hit_set_search_last_n",
p->hit_set_search_last_n);
break;
case SCRUB_MIN_INTERVAL:
case SCRUB_MAX_INTERVAL:
case DEEP_SCRUB_INTERVAL:
case RECOVERY_PRIORITY:
case RECOVERY_OP_PRIORITY:
case SCRUB_PRIORITY:
case COMPRESSION_MODE:
case COMPRESSION_ALGORITHM:
case COMPRESSION_REQUIRED_RATIO:
case COMPRESSION_MAX_BLOB_SIZE:
case COMPRESSION_MIN_BLOB_SIZE:
case CSUM_TYPE:
case CSUM_MAX_BLOCK:
case CSUM_MIN_BLOCK:
case FINGERPRINT_ALGORITHM:
case PG_NUM_MIN:
case PG_NUM_MAX:
case TARGET_SIZE_BYTES:
case TARGET_SIZE_RATIO:
case PG_AUTOSCALE_BIAS:
case DEDUP_TIER:
case DEDUP_CHUNK_ALGORITHM:
case DEDUP_CDC_CHUNK_SIZE:
pool_opts_t::key_t key = pool_opts_t::get_opt_desc(i->first).key;
if (p->opts.is_set(key)) {
if(*it == CSUM_TYPE) {
int64_t val;
p->opts.get(pool_opts_t::CSUM_TYPE, &val);
f->dump_string(i->first.c_str(), Checksummer::get_csum_type_string(val));
} else {
p->opts.dump(i->first, f.get());
}
}
break;
}
}
f->close_section();
f->flush(rdata);
} else /* !f */ {
for(choices_set_t::const_iterator it = selected_choices.begin();
it != selected_choices.end(); ++it) {
choices_map_t::const_iterator i;
switch(*it) {
case PG_NUM:
ss << "pg_num: " << p->get_pg_num() << "\n";
break;
case PGP_NUM:
ss << "pgp_num: " << p->get_pgp_num() << "\n";
break;
case SIZE:
ss << "size: " << p->get_size() << "\n";
break;
case MIN_SIZE:
ss << "min_size: " << p->get_min_size() << "\n";
break;
case CRUSH_RULE:
if (osdmap.crush->rule_exists(p->get_crush_rule())) {
ss << "crush_rule: " << osdmap.crush->get_rule_name(
p->get_crush_rule()) << "\n";
} else {
ss << "crush_rule: " << p->get_crush_rule() << "\n";
}
break;
case PG_AUTOSCALE_MODE:
ss << "pg_autoscale_mode: " << pg_pool_t::get_pg_autoscale_mode_name(
p->pg_autoscale_mode) <<"\n";
break;
case HIT_SET_PERIOD:
ss << "hit_set_period: " << p->hit_set_period << "\n";
break;
case HIT_SET_COUNT:
ss << "hit_set_count: " << p->hit_set_count << "\n";
break;
case HIT_SET_TYPE:
ss << "hit_set_type: " <<
HitSet::get_type_name(p->hit_set_params.get_type()) << "\n";
break;
case HIT_SET_FPP:
{
if (p->hit_set_params.get_type() == HitSet::TYPE_BLOOM) {
BloomHitSet::Params *bloomp =
static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
ss << "hit_set_fpp: " << bloomp->get_fpp() << "\n";
} else if(var != "all") {
ss << "hit set is not of type Bloom; " <<
"invalid to get a false positive rate!";
r = -EINVAL;
goto reply;
}
}
break;
case USE_GMT_HITSET:
ss << "use_gmt_hitset: " << p->use_gmt_hitset << "\n";
break;
case TARGET_MAX_OBJECTS:
ss << "target_max_objects: " << p->target_max_objects << "\n";
break;
case TARGET_MAX_BYTES:
ss << "target_max_bytes: " << p->target_max_bytes << "\n";
break;
case CACHE_TARGET_DIRTY_RATIO:
ss << "cache_target_dirty_ratio: "
<< ((float)p->cache_target_dirty_ratio_micro/1000000) << "\n";
break;
case CACHE_TARGET_DIRTY_HIGH_RATIO:
ss << "cache_target_dirty_high_ratio: "
<< ((float)p->cache_target_dirty_high_ratio_micro/1000000) << "\n";
break;
case CACHE_TARGET_FULL_RATIO:
ss << "cache_target_full_ratio: "
<< ((float)p->cache_target_full_ratio_micro/1000000) << "\n";
break;
case CACHE_MIN_FLUSH_AGE:
ss << "cache_min_flush_age: " << p->cache_min_flush_age << "\n";
break;
case CACHE_MIN_EVICT_AGE:
ss << "cache_min_evict_age: " << p->cache_min_evict_age << "\n";
break;
case ERASURE_CODE_PROFILE:
ss << "erasure_code_profile: " << p->erasure_code_profile << "\n";
break;
case MIN_READ_RECENCY_FOR_PROMOTE:
ss << "min_read_recency_for_promote: " <<
p->min_read_recency_for_promote << "\n";
break;
case HIT_SET_GRADE_DECAY_RATE:
ss << "hit_set_grade_decay_rate: " <<
p->hit_set_grade_decay_rate << "\n";
break;
case HIT_SET_SEARCH_LAST_N:
ss << "hit_set_search_last_n: " <<
p->hit_set_search_last_n << "\n";
break;
case EC_OVERWRITES:
ss << "allow_ec_overwrites: " <<
(p->has_flag(pg_pool_t::FLAG_EC_OVERWRITES) ? "true" : "false") <<
"\n";
break;
case HASHPSPOOL:
case POOL_EIO:
case NODELETE:
case BULK:
case NOPGCHANGE:
case NOSIZECHANGE:
case WRITE_FADVISE_DONTNEED:
case NOSCRUB:
case NODEEP_SCRUB:
for (i = ALL_CHOICES.begin(); i != ALL_CHOICES.end(); ++i) {
if (i->second == *it)
break;
}
ceph_assert(i != ALL_CHOICES.end());
ss << i->first << ": " <<
(p->has_flag(pg_pool_t::get_flag_by_name(i->first)) ?
"true" : "false") << "\n";
break;
case MIN_WRITE_RECENCY_FOR_PROMOTE:
ss << "min_write_recency_for_promote: " <<
p->min_write_recency_for_promote << "\n";
break;
case FAST_READ:
ss << "fast_read: " << p->fast_read << "\n";
break;
case SCRUB_MIN_INTERVAL:
case SCRUB_MAX_INTERVAL:
case DEEP_SCRUB_INTERVAL:
case RECOVERY_PRIORITY:
case RECOVERY_OP_PRIORITY:
case SCRUB_PRIORITY:
case COMPRESSION_MODE:
case COMPRESSION_ALGORITHM:
case COMPRESSION_REQUIRED_RATIO:
case COMPRESSION_MAX_BLOB_SIZE:
case COMPRESSION_MIN_BLOB_SIZE:
case CSUM_TYPE:
case CSUM_MAX_BLOCK:
case CSUM_MIN_BLOCK:
case FINGERPRINT_ALGORITHM:
case PG_NUM_MIN:
case PG_NUM_MAX:
case TARGET_SIZE_BYTES:
case TARGET_SIZE_RATIO:
case PG_AUTOSCALE_BIAS:
case DEDUP_TIER:
case DEDUP_CHUNK_ALGORITHM:
case DEDUP_CDC_CHUNK_SIZE:
for (i = ALL_CHOICES.begin(); i != ALL_CHOICES.end(); ++i) {
if (i->second == *it)
break;
}
ceph_assert(i != ALL_CHOICES.end());
{
pool_opts_t::key_t key = pool_opts_t::get_opt_desc(i->first).key;
if (p->opts.is_set(key)) {
if(key == pool_opts_t::CSUM_TYPE) {
int64_t val;
p->opts.get(key, &val);
ss << i->first << ": " << Checksummer::get_csum_type_string(val) << "\n";
} else {
ss << i->first << ": " << p->opts.get(key) << "\n";
}
}
}
break;
}
rdata.append(ss.str());
ss.str("");
}
}
r = 0;
} else if (prefix == "osd pool get-quota") {
string pool_name;
cmd_getval(cmdmap, "pool", pool_name);
int64_t poolid = osdmap.lookup_pg_pool_name(pool_name);
if (poolid < 0) {
ceph_assert(poolid == -ENOENT);
ss << "unrecognized pool '" << pool_name << "'";
r = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(poolid);
const pool_stat_t* pstat = mon.mgrstatmon()->get_pool_stat(poolid);
if (!pstat) {
ss << "no stats for pool '" << pool_name << "'";
r = -ENOENT;
goto reply;
}
const object_stat_sum_t& sum = pstat->stats.sum;
if (f) {
f->open_object_section("pool_quotas");
f->dump_string("pool_name", pool_name);
f->dump_unsigned("pool_id", poolid);
f->dump_unsigned("quota_max_objects", p->quota_max_objects);
f->dump_int("current_num_objects", sum.num_objects);
f->dump_unsigned("quota_max_bytes", p->quota_max_bytes);
f->dump_int("current_num_bytes", sum.num_bytes);
f->close_section();
f->flush(rdata);
} else {
stringstream rs;
rs << "quotas for pool '" << pool_name << "':\n"
<< " max objects: ";
if (p->quota_max_objects == 0)
rs << "N/A";
else {
rs << si_u_t(p->quota_max_objects) << " objects";
rs << " (current num objects: " << sum.num_objects << " objects)";
}
rs << "\n"
<< " max bytes : ";
if (p->quota_max_bytes == 0)
rs << "N/A";
else {
rs << byte_u_t(p->quota_max_bytes);
rs << " (current num bytes: " << sum.num_bytes << " bytes)";
}
rdata.append(rs.str());
}
rdata.append("\n");
r = 0;
} else if (prefix == "osd crush rule list" ||
prefix == "osd crush rule ls") {
if (f) {
f->open_array_section("rules");
osdmap.crush->list_rules(f.get());
f->close_section();
f->flush(rdata);
} else {
ostringstream ss;
osdmap.crush->list_rules(&ss);
rdata.append(ss.str());
}
} else if (prefix == "osd crush rule ls-by-class") {
string class_name;
cmd_getval(cmdmap, "class", class_name);
if (class_name.empty()) {
ss << "no class specified";
r = -EINVAL;
goto reply;
}
set<int> rules;
r = osdmap.crush->get_rules_by_class(class_name, &rules);
if (r < 0) {
ss << "failed to get rules by class '" << class_name << "'";
goto reply;
}
if (f) {
f->open_array_section("rules");
for (auto &rule: rules) {
f->dump_string("name", osdmap.crush->get_rule_name(rule));
}
f->close_section();
f->flush(rdata);
} else {
ostringstream rs;
for (auto &rule: rules) {
rs << osdmap.crush->get_rule_name(rule) << "\n";
}
rdata.append(rs.str());
}
} else if (prefix == "osd crush rule dump") {
string name;
cmd_getval(cmdmap, "name", name);
string format;
cmd_getval(cmdmap, "format", format);
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty", "json-pretty"));
if (name == "") {
f->open_array_section("rules");
osdmap.crush->dump_rules(f.get());
f->close_section();
} else {
int ruleno = osdmap.crush->get_rule_id(name);
if (ruleno < 0) {
ss << "unknown crush rule '" << name << "'";
r = ruleno;
goto reply;
}
osdmap.crush->dump_rule(ruleno, f.get());
}
ostringstream rs;
f->flush(rs);
rs << "\n";
rdata.append(rs.str());
} else if (prefix == "osd crush dump") {
string format;
cmd_getval(cmdmap, "format", format);
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty", "json-pretty"));
f->open_object_section("crush_map");
osdmap.crush->dump(f.get());
f->close_section();
ostringstream rs;
f->flush(rs);
rs << "\n";
rdata.append(rs.str());
} else if (prefix == "osd crush show-tunables") {
string format;
cmd_getval(cmdmap, "format", format);
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty", "json-pretty"));
f->open_object_section("crush_map_tunables");
osdmap.crush->dump_tunables(f.get());
f->close_section();
ostringstream rs;
f->flush(rs);
rs << "\n";
rdata.append(rs.str());
} else if (prefix == "osd crush tree") {
bool show_shadow = false;
if (!cmd_getval_compat_cephbool(cmdmap, "show_shadow", show_shadow)) {
std::string shadow;
if (cmd_getval(cmdmap, "shadow", shadow) &&
shadow == "--show-shadow") {
show_shadow = true;
}
}
boost::scoped_ptr<Formatter> f(Formatter::create(format));
if (f) {
f->open_object_section("crush_tree");
osdmap.crush->dump_tree(nullptr,
f.get(),
osdmap.get_pool_names(),
show_shadow);
f->close_section();
f->flush(rdata);
} else {
ostringstream ss;
osdmap.crush->dump_tree(&ss,
nullptr,
osdmap.get_pool_names(),
show_shadow);
rdata.append(ss.str());
}
} else if (prefix == "osd crush ls") {
string name;
if (!cmd_getval(cmdmap, "node", name)) {
ss << "no node specified";
r = -EINVAL;
goto reply;
}
if (!osdmap.crush->name_exists(name)) {
ss << "node '" << name << "' does not exist";
r = -ENOENT;
goto reply;
}
int id = osdmap.crush->get_item_id(name);
list<int> result;
if (id >= 0) {
result.push_back(id);
} else {
int num = osdmap.crush->get_bucket_size(id);
for (int i = 0; i < num; ++i) {
result.push_back(osdmap.crush->get_bucket_item(id, i));
}
}
if (f) {
f->open_array_section("items");
for (auto i : result) {
f->dump_string("item", osdmap.crush->get_item_name(i));
}
f->close_section();
f->flush(rdata);
} else {
ostringstream ss;
for (auto i : result) {
ss << osdmap.crush->get_item_name(i) << "\n";
}
rdata.append(ss.str());
}
r = 0;
} else if (prefix == "osd crush class ls") {
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty", "json-pretty"));
f->open_array_section("crush_classes");
for (auto i : osdmap.crush->class_name)
f->dump_string("class", i.second);
f->close_section();
f->flush(rdata);
} else if (prefix == "osd crush class ls-osd") {
string name;
cmd_getval(cmdmap, "class", name);
set<int> osds;
osdmap.crush->get_devices_by_class(name, &osds);
if (f) {
f->open_array_section("osds");
for (auto &osd: osds)
f->dump_int("osd", osd);
f->close_section();
f->flush(rdata);
} else {
bool first = true;
for (auto &osd : osds) {
if (!first)
ds << "\n";
first = false;
ds << osd;
}
rdata.append(ds);
}
} else if (prefix == "osd crush get-device-class") {
vector<string> idvec;
cmd_getval(cmdmap, "ids", idvec);
map<int, string> class_by_osd;
for (auto& id : idvec) {
ostringstream ts;
long osd = parse_osd_id(id.c_str(), &ts);
if (osd < 0) {
ss << "unable to parse osd id:'" << id << "'";
r = -EINVAL;
goto reply;
}
auto device_class = osdmap.crush->get_item_class(osd);
if (device_class)
class_by_osd[osd] = device_class;
else
class_by_osd[osd] = ""; // no class
}
if (f) {
f->open_array_section("osd_device_classes");
for (auto& i : class_by_osd) {
f->open_object_section("osd_device_class");
f->dump_int("osd", i.first);
f->dump_string("device_class", i.second);
f->close_section();
}
f->close_section();
f->flush(rdata);
} else {
if (class_by_osd.size() == 1) {
// for single input, make a clean output
ds << class_by_osd.begin()->second;
} else {
// note that we do not group osds by class here
for (auto it = class_by_osd.begin();
it != class_by_osd.end();
it++) {
ds << "osd." << it->first << ' ' << it->second;
if (next(it) != class_by_osd.end())
ds << '\n';
}
}
rdata.append(ds);
}
} else if (prefix == "osd erasure-code-profile ls") {
const auto &profiles = osdmap.get_erasure_code_profiles();
if (f)
f->open_array_section("erasure-code-profiles");
for (auto i = profiles.begin(); i != profiles.end(); ++i) {
if (f)
f->dump_string("profile", i->first.c_str());
else
rdata.append(i->first + "\n");
}
if (f) {
f->close_section();
ostringstream rs;
f->flush(rs);
rs << "\n";
rdata.append(rs.str());
}
} else if (prefix == "osd crush weight-set ls") {
boost::scoped_ptr<Formatter> f(Formatter::create(format));
if (f) {
f->open_array_section("weight_sets");
if (osdmap.crush->have_choose_args(CrushWrapper::DEFAULT_CHOOSE_ARGS)) {
f->dump_string("pool", "(compat)");
}
for (auto& i : osdmap.crush->choose_args) {
if (i.first >= 0) {
f->dump_string("pool", osdmap.get_pool_name(i.first));
}
}
f->close_section();
f->flush(rdata);
} else {
ostringstream rs;
if (osdmap.crush->have_choose_args(CrushWrapper::DEFAULT_CHOOSE_ARGS)) {
rs << "(compat)\n";
}
for (auto& i : osdmap.crush->choose_args) {
if (i.first >= 0) {
rs << osdmap.get_pool_name(i.first) << "\n";
}
}
rdata.append(rs.str());
}
} else if (prefix == "osd crush weight-set dump") {
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty",
"json-pretty"));
osdmap.crush->dump_choose_args(f.get());
f->flush(rdata);
} else if (prefix == "osd erasure-code-profile get") {
string name;
cmd_getval(cmdmap, "name", name);
if (!osdmap.has_erasure_code_profile(name)) {
ss << "unknown erasure code profile '" << name << "'";
r = -ENOENT;
goto reply;
}
const map<string,string> &profile = osdmap.get_erasure_code_profile(name);
if (f)
f->open_object_section("profile");
for (map<string,string>::const_iterator i = profile.begin();
i != profile.end();
++i) {
if (f)
f->dump_string(i->first.c_str(), i->second.c_str());
else
rdata.append(i->first + "=" + i->second + "\n");
}
if (f) {
f->close_section();
ostringstream rs;
f->flush(rs);
rs << "\n";
rdata.append(rs.str());
}
} else if (prefix == "osd pool application get") {
boost::scoped_ptr<Formatter> f(Formatter::create(format, "json-pretty",
"json-pretty"));
string pool_name;
cmd_getval(cmdmap, "pool", pool_name);
string app;
cmd_getval(cmdmap, "app", app);
string key;
cmd_getval(cmdmap, "key", key);
if (pool_name.empty()) {
// all
f->open_object_section("pools");
for (const auto &pool : osdmap.pools) {
std::string name("<unknown>");
const auto &pni = osdmap.pool_name.find(pool.first);
if (pni != osdmap.pool_name.end())
name = pni->second;
f->open_object_section(name.c_str());
for (auto &app_pair : pool.second.application_metadata) {
f->open_object_section(app_pair.first.c_str());
for (auto &kv_pair : app_pair.second) {
f->dump_string(kv_pair.first.c_str(), kv_pair.second);
}
f->close_section();
}
f->close_section(); // name
}
f->close_section(); // pools
f->flush(rdata);
} else {
int64_t pool = osdmap.lookup_pg_pool_name(pool_name.c_str());
if (pool < 0) {
ss << "unrecognized pool '" << pool_name << "'";
r = -ENOENT;
goto reply;
}
auto p = osdmap.get_pg_pool(pool);
// filter by pool
if (app.empty()) {
f->open_object_section(pool_name.c_str());
for (auto &app_pair : p->application_metadata) {
f->open_object_section(app_pair.first.c_str());
for (auto &kv_pair : app_pair.second) {
f->dump_string(kv_pair.first.c_str(), kv_pair.second);
}
f->close_section(); // application
}
f->close_section(); // pool_name
f->flush(rdata);
goto reply;
}
auto app_it = p->application_metadata.find(app);
if (app_it == p->application_metadata.end()) {
ss << "pool '" << pool_name << "' has no application '" << app << "'";
r = -ENOENT;
goto reply;
}
// filter by pool + app
if (key.empty()) {
f->open_object_section(app_it->first.c_str());
for (auto &kv_pair : app_it->second) {
f->dump_string(kv_pair.first.c_str(), kv_pair.second);
}
f->close_section(); // application
f->flush(rdata);
goto reply;
}
// filter by pool + app + key
auto key_it = app_it->second.find(key);
if (key_it == app_it->second.end()) {
ss << "application '" << app << "' on pool '" << pool_name
<< "' does not have key '" << key << "'";
r = -ENOENT;
goto reply;
}
ss << key_it->second << "\n";
rdata.append(ss.str());
ss.str("");
}
} else if (prefix == "osd get-require-min-compat-client") {
ss << osdmap.require_min_compat_client << std::endl;
rdata.append(ss.str());
ss.str("");
goto reply;
} else if (prefix == "osd pool application enable" ||
prefix == "osd pool application disable" ||
prefix == "osd pool application set" ||
prefix == "osd pool application rm") {
bool changed = false;
r = preprocess_command_pool_application(prefix, cmdmap, ss, &changed);
if (r != 0) {
// Error, reply.
goto reply;
} else if (changed) {
// Valid mutation, proceed to prepare phase
return false;
} else {
// Idempotent case, reply
goto reply;
}
} else {
// try prepare update
return false;
}
reply:
string rs;
getline(ss, rs);
mon.reply_command(op, r, rs, rdata, get_last_committed());
return true;
}
void OSDMonitor::set_pool_flags(int64_t pool_id, uint64_t flags)
{
pg_pool_t *pool = pending_inc.get_new_pool(pool_id,
osdmap.get_pg_pool(pool_id));
ceph_assert(pool);
pool->set_flag(flags);
}
void OSDMonitor::clear_pool_flags(int64_t pool_id, uint64_t flags)
{
pg_pool_t *pool = pending_inc.get_new_pool(pool_id,
osdmap.get_pg_pool(pool_id));
ceph_assert(pool);
pool->unset_flag(flags);
}
string OSDMonitor::make_purged_snap_epoch_key(epoch_t epoch)
{
char k[80];
snprintf(k, sizeof(k), "purged_epoch_%08lx", (unsigned long)epoch);
return k;
}
string OSDMonitor::make_purged_snap_key(int64_t pool, snapid_t snap)
{
char k[80];
snprintf(k, sizeof(k), "purged_snap_%llu_%016llx",
(unsigned long long)pool, (unsigned long long)snap);
return k;
}
string OSDMonitor::make_purged_snap_key_value(
int64_t pool, snapid_t snap, snapid_t num,
epoch_t epoch, bufferlist *v)
{
// encode the *last* epoch in the key so that we can use forward
// iteration only to search for an epoch in an interval.
encode(snap, *v);
encode(snap + num, *v);
encode(epoch, *v);
return make_purged_snap_key(pool, snap + num - 1);
}
int OSDMonitor::lookup_purged_snap(
int64_t pool, snapid_t snap,
snapid_t *begin, snapid_t *end)
{
string k = make_purged_snap_key(pool, snap);
auto it = mon.store->get_iterator(OSD_SNAP_PREFIX);
it->lower_bound(k);
if (!it->valid()) {
dout(20) << __func__
<< " pool " << pool << " snap " << snap
<< " - key '" << k << "' not found" << dendl;
return -ENOENT;
}
if (it->key().find("purged_snap_") != 0) {
dout(20) << __func__
<< " pool " << pool << " snap " << snap
<< " - key '" << k << "' got '" << it->key()
<< "', wrong prefix" << dendl;
return -ENOENT;
}
string gotk = it->key();
const char *format = "purged_snap_%llu_";
long long int keypool;
int n = sscanf(gotk.c_str(), format, &keypool);
if (n != 1) {
derr << __func__ << " invalid k '" << gotk << "'" << dendl;
return -ENOENT;
}
if (pool != keypool) {
dout(20) << __func__
<< " pool " << pool << " snap " << snap
<< " - key '" << k << "' got '" << gotk
<< "', wrong pool " << keypool
<< dendl;
return -ENOENT;
}
bufferlist v = it->value();
auto p = v.cbegin();
decode(*begin, p);
decode(*end, p);
if (snap < *begin || snap >= *end) {
dout(20) << __func__
<< " pool " << pool << " snap " << snap
<< " - found [" << *begin << "," << *end << "), no overlap"
<< dendl;
return -ENOENT;
}
return 0;
}
void OSDMonitor::insert_purged_snap_update(
int64_t pool,
snapid_t start, snapid_t end,
epoch_t epoch,
MonitorDBStore::TransactionRef t)
{
snapid_t before_begin, before_end;
snapid_t after_begin, after_end;
int b = lookup_purged_snap(pool, start - 1,
&before_begin, &before_end);
int a = lookup_purged_snap(pool, end,
&after_begin, &after_end);
if (!b && !a) {
dout(10) << __func__
<< " [" << start << "," << end << ") - joins ["
<< before_begin << "," << before_end << ") and ["
<< after_begin << "," << after_end << ")" << dendl;
// erase only the begin record; we'll overwrite the end one.
t->erase(OSD_SNAP_PREFIX, make_purged_snap_key(pool, before_end - 1));
bufferlist v;
string k = make_purged_snap_key_value(pool,
before_begin, after_end - before_begin,
pending_inc.epoch, &v);
t->put(OSD_SNAP_PREFIX, k, v);
} else if (!b) {
dout(10) << __func__
<< " [" << start << "," << end << ") - join with earlier ["
<< before_begin << "," << before_end << ")" << dendl;
t->erase(OSD_SNAP_PREFIX, make_purged_snap_key(pool, before_end - 1));
bufferlist v;
string k = make_purged_snap_key_value(pool,
before_begin, end - before_begin,
pending_inc.epoch, &v);
t->put(OSD_SNAP_PREFIX, k, v);
} else if (!a) {
dout(10) << __func__
<< " [" << start << "," << end << ") - join with later ["
<< after_begin << "," << after_end << ")" << dendl;
// overwrite after record
bufferlist v;
string k = make_purged_snap_key_value(pool,
start, after_end - start,
pending_inc.epoch, &v);
t->put(OSD_SNAP_PREFIX, k, v);
} else {
dout(10) << __func__
<< " [" << start << "," << end << ") - new"
<< dendl;
bufferlist v;
string k = make_purged_snap_key_value(pool,
start, end - start,
pending_inc.epoch, &v);
t->put(OSD_SNAP_PREFIX, k, v);
}
}
bool OSDMonitor::try_prune_purged_snaps()
{
if (!mon.mgrstatmon()->is_readable()) {
return false;
}
if (!pending_inc.new_purged_snaps.empty()) {
return false; // we already pruned for this epoch
}
unsigned max_prune = cct->_conf.get_val<uint64_t>(
"mon_max_snap_prune_per_epoch");
if (!max_prune) {
max_prune = 100000;
}
dout(10) << __func__ << " max_prune " << max_prune << dendl;
unsigned actually_pruned = 0;
auto& purged_snaps = mon.mgrstatmon()->get_digest().purged_snaps;
for (auto& p : osdmap.get_pools()) {
auto q = purged_snaps.find(p.first);
if (q == purged_snaps.end()) {
continue;
}
auto& purged = q->second;
if (purged.empty()) {
dout(20) << __func__ << " " << p.first << " nothing purged" << dendl;
continue;
}
dout(20) << __func__ << " pool " << p.first << " purged " << purged << dendl;
snap_interval_set_t to_prune;
unsigned maybe_pruned = actually_pruned;
for (auto i = purged.begin(); i != purged.end(); ++i) {
snapid_t begin = i.get_start();
auto end = i.get_start() + i.get_len();
snapid_t pbegin = 0, pend = 0;
int r = lookup_purged_snap(p.first, begin, &pbegin, &pend);
if (r == 0) {
// already purged.
// be a bit aggressive about backing off here, because the mon may
// do a lot of work going through this set, and if we know the
// purged set from the OSDs is at least *partly* stale we may as
// well wait for it to be fresh.
dout(20) << __func__ << " we've already purged " << pbegin
<< "~" << (pend - pbegin) << dendl;
break; // next pool
}
if (pbegin && pbegin > begin && pbegin < end) {
// the tail of [begin,end) is purged; shorten the range
end = pbegin;
}
to_prune.insert(begin, end - begin);
maybe_pruned += end - begin;
if (maybe_pruned >= max_prune) {
break;
}
}
if (!to_prune.empty()) {
// PGs may still be reporting things as purged that we have already
// pruned from removed_snaps_queue.
snap_interval_set_t actual;
auto r = osdmap.removed_snaps_queue.find(p.first);
if (r != osdmap.removed_snaps_queue.end()) {
actual.intersection_of(to_prune, r->second);
}
actually_pruned += actual.size();
dout(10) << __func__ << " pool " << p.first << " reports pruned " << to_prune
<< ", actual pruned " << actual << dendl;
if (!actual.empty()) {
pending_inc.new_purged_snaps[p.first].swap(actual);
}
}
if (actually_pruned >= max_prune) {
break;
}
}
dout(10) << __func__ << " actually pruned " << actually_pruned << dendl;
return !!actually_pruned;
}
bool OSDMonitor::update_pools_status()
{
if (!mon.mgrstatmon()->is_readable())
return false;
bool ret = false;
auto& pools = osdmap.get_pools();
for (auto it = pools.begin(); it != pools.end(); ++it) {
const pool_stat_t *pstat = mon.mgrstatmon()->get_pool_stat(it->first);
if (!pstat)
continue;
const object_stat_sum_t& sum = pstat->stats.sum;
const pg_pool_t &pool = it->second;
const string& pool_name = osdmap.get_pool_name(it->first);
bool pool_is_full =
(pool.quota_max_bytes > 0 && (uint64_t)sum.num_bytes >= pool.quota_max_bytes) ||
(pool.quota_max_objects > 0 && (uint64_t)sum.num_objects >= pool.quota_max_objects);
if (pool.has_flag(pg_pool_t::FLAG_FULL_QUOTA)) {
if (pool_is_full)
continue;
mon.clog->info() << "pool '" << pool_name
<< "' no longer out of quota; removing NO_QUOTA flag";
// below we cancel FLAG_FULL too, we'll set it again in
// OSDMonitor::encode_pending if it still fails the osd-full checking.
clear_pool_flags(it->first,
pg_pool_t::FLAG_FULL_QUOTA | pg_pool_t::FLAG_FULL);
ret = true;
} else {
if (!pool_is_full)
continue;
if (pool.quota_max_bytes > 0 &&
(uint64_t)sum.num_bytes >= pool.quota_max_bytes) {
mon.clog->warn() << "pool '" << pool_name << "' is full"
<< " (reached quota's max_bytes: "
<< byte_u_t(pool.quota_max_bytes) << ")";
}
if (pool.quota_max_objects > 0 &&
(uint64_t)sum.num_objects >= pool.quota_max_objects) {
mon.clog->warn() << "pool '" << pool_name << "' is full"
<< " (reached quota's max_objects: "
<< pool.quota_max_objects << ")";
}
// set both FLAG_FULL_QUOTA and FLAG_FULL
// note that below we try to cancel FLAG_BACKFILLFULL/NEARFULL too
// since FLAG_FULL should always take precedence
set_pool_flags(it->first,
pg_pool_t::FLAG_FULL_QUOTA | pg_pool_t::FLAG_FULL);
clear_pool_flags(it->first,
pg_pool_t::FLAG_NEARFULL |
pg_pool_t::FLAG_BACKFILLFULL);
ret = true;
}
}
return ret;
}
int OSDMonitor::prepare_new_pool(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MPoolOp>();
dout(10) << "prepare_new_pool from " << m->get_connection() << dendl;
MonSession *session = op->get_session();
if (!session)
return -EPERM;
string erasure_code_profile;
stringstream ss;
string rule_name;
bool bulk = false;
int ret = 0;
ret = prepare_new_pool(m->name, m->crush_rule, rule_name,
0, 0, 0, 0, 0, 0, 0.0,
erasure_code_profile,
pg_pool_t::TYPE_REPLICATED, 0, FAST_READ_OFF, {}, bulk,
cct->_conf.get_val<bool>("osd_pool_default_crimson"),
&ss);
if (ret < 0) {
dout(10) << __func__ << " got " << ret << " " << ss.str() << dendl;
}
return ret;
}
int OSDMonitor::crush_rename_bucket(const string& srcname,
const string& dstname,
ostream *ss)
{
int ret;
//
// Avoid creating a pending crush if it does not already exists and
// the rename would fail.
//
if (!_have_pending_crush()) {
ret = _get_stable_crush().can_rename_bucket(srcname,
dstname,
ss);
if (ret)
return ret;
}
CrushWrapper newcrush = _get_pending_crush();
ret = newcrush.rename_bucket(srcname,
dstname,
ss);
if (ret)
return ret;
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
*ss << "renamed bucket " << srcname << " into " << dstname;
return 0;
}
void OSDMonitor::check_legacy_ec_plugin(const string& plugin, const string& profile) const
{
string replacement = "";
if (plugin == "jerasure_generic" ||
plugin == "jerasure_sse3" ||
plugin == "jerasure_sse4" ||
plugin == "jerasure_neon") {
replacement = "jerasure";
} else if (plugin == "shec_generic" ||
plugin == "shec_sse3" ||
plugin == "shec_sse4" ||
plugin == "shec_neon") {
replacement = "shec";
}
if (replacement != "") {
dout(0) << "WARNING: erasure coding profile " << profile << " uses plugin "
<< plugin << " that has been deprecated. Please use "
<< replacement << " instead." << dendl;
}
}
int OSDMonitor::normalize_profile(const string& profilename,
ErasureCodeProfile &profile,
bool force,
ostream *ss)
{
ErasureCodeInterfaceRef erasure_code;
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeProfile::const_iterator plugin = profile.find("plugin");
check_legacy_ec_plugin(plugin->second, profilename);
int err = instance.factory(plugin->second,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, ss);
if (err) {
return err;
}
err = erasure_code->init(profile, ss);
if (err) {
return err;
}
auto it = profile.find("stripe_unit");
if (it != profile.end()) {
string err_str;
uint32_t stripe_unit = strict_iecstrtoll(it->second, &err_str);
if (!err_str.empty()) {
*ss << "could not parse stripe_unit '" << it->second
<< "': " << err_str << std::endl;
return -EINVAL;
}
uint32_t data_chunks = erasure_code->get_data_chunk_count();
uint32_t chunk_size = erasure_code->get_chunk_size(stripe_unit * data_chunks);
if (chunk_size != stripe_unit) {
*ss << "stripe_unit " << stripe_unit << " does not match ec profile "
<< "alignment. Would be padded to " << chunk_size
<< std::endl;
return -EINVAL;
}
if ((stripe_unit % 4096) != 0 && !force) {
*ss << "stripe_unit should be a multiple of 4096 bytes for best performance."
<< "use --force to override this check" << std::endl;
return -EINVAL;
}
}
return 0;
}
int OSDMonitor::crush_rule_create_erasure(const string &name,
const string &profile,
int *rule,
ostream *ss)
{
int ruleid = osdmap.crush->get_rule_id(name);
if (ruleid != -ENOENT) {
*rule = ruleid;
return -EEXIST;
}
CrushWrapper newcrush = _get_pending_crush();
ruleid = newcrush.get_rule_id(name);
if (ruleid != -ENOENT) {
*rule = ruleid;
return -EALREADY;
} else {
ErasureCodeInterfaceRef erasure_code;
int err = get_erasure_code(profile, &erasure_code, ss);
if (err) {
*ss << "failed to load plugin using profile " << profile << std::endl;
return err;
}
err = erasure_code->create_rule(name, newcrush, ss);
erasure_code.reset();
if (err < 0)
return err;
*rule = err;
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
return 0;
}
}
int OSDMonitor::get_erasure_code(const string &erasure_code_profile,
ErasureCodeInterfaceRef *erasure_code,
ostream *ss) const
{
if (pending_inc.has_erasure_code_profile(erasure_code_profile))
return -EAGAIN;
ErasureCodeProfile profile =
osdmap.get_erasure_code_profile(erasure_code_profile);
ErasureCodeProfile::const_iterator plugin =
profile.find("plugin");
if (plugin == profile.end()) {
*ss << "cannot determine the erasure code plugin"
<< " because there is no 'plugin' entry in the erasure_code_profile "
<< profile << std::endl;
return -EINVAL;
}
check_legacy_ec_plugin(plugin->second, erasure_code_profile);
auto& instance = ErasureCodePluginRegistry::instance();
return instance.factory(plugin->second,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, erasure_code, ss);
}
int OSDMonitor::check_cluster_features(uint64_t features,
stringstream &ss)
{
stringstream unsupported_ss;
int unsupported_count = 0;
if ((mon.get_quorum_con_features() & features) != features) {
unsupported_ss << "the monitor cluster";
++unsupported_count;
}
set<int32_t> up_osds;
osdmap.get_up_osds(up_osds);
for (set<int32_t>::iterator it = up_osds.begin();
it != up_osds.end(); ++it) {
const osd_xinfo_t &xi = osdmap.get_xinfo(*it);
if ((xi.features & features) != features) {
if (unsupported_count > 0)
unsupported_ss << ", ";
unsupported_ss << "osd." << *it;
unsupported_count ++;
}
}
if (unsupported_count > 0) {
ss << "features " << features << " unsupported by: "
<< unsupported_ss.str();
return -ENOTSUP;
}
// check pending osd state, too!
for (map<int32_t,osd_xinfo_t>::const_iterator p =
pending_inc.new_xinfo.begin();
p != pending_inc.new_xinfo.end(); ++p) {
const osd_xinfo_t &xi = p->second;
if ((xi.features & features) != features) {
dout(10) << __func__ << " pending osd." << p->first
<< " features are insufficient; retry" << dendl;
return -EAGAIN;
}
}
return 0;
}
bool OSDMonitor::validate_crush_against_features(const CrushWrapper *newcrush,
stringstream& ss)
{
OSDMap::Incremental new_pending = pending_inc;
encode(*newcrush, new_pending.crush, mon.get_quorum_con_features());
OSDMap newmap;
newmap.deepish_copy_from(osdmap);
newmap.apply_incremental(new_pending);
// client compat
if (newmap.require_min_compat_client != ceph_release_t::unknown) {
auto mv = newmap.get_min_compat_client();
if (mv > newmap.require_min_compat_client) {
ss << "new crush map requires client version " << mv
<< " but require_min_compat_client is "
<< newmap.require_min_compat_client;
return false;
}
}
// osd compat
uint64_t features =
newmap.get_features(CEPH_ENTITY_TYPE_MON, NULL) |
newmap.get_features(CEPH_ENTITY_TYPE_OSD, NULL);
stringstream features_ss;
int r = check_cluster_features(features, features_ss);
if (r) {
ss << "Could not change CRUSH: " << features_ss.str();
return false;
}
return true;
}
bool OSDMonitor::erasure_code_profile_in_use(
const mempool::osdmap::map<int64_t, pg_pool_t> &pools,
const string &profile,
ostream *ss)
{
bool found = false;
for (map<int64_t, pg_pool_t>::const_iterator p = pools.begin();
p != pools.end();
++p) {
if (p->second.erasure_code_profile == profile && p->second.is_erasure()) {
*ss << osdmap.pool_name[p->first] << " ";
found = true;
}
}
if (found) {
*ss << "pool(s) are using the erasure code profile '" << profile << "'";
}
return found;
}
int OSDMonitor::parse_erasure_code_profile(const vector<string> &erasure_code_profile,
map<string,string> *erasure_code_profile_map,
ostream *ss)
{
int r = g_conf().with_val<string>("osd_pool_default_erasure_code_profile",
get_json_str_map,
*ss,
erasure_code_profile_map,
true);
if (r)
return r;
ceph_assert((*erasure_code_profile_map).count("plugin"));
string default_plugin = (*erasure_code_profile_map)["plugin"];
map<string,string> user_map;
for (vector<string>::const_iterator i = erasure_code_profile.begin();
i != erasure_code_profile.end();
++i) {
size_t equal = i->find('=');
if (equal == string::npos) {
user_map[*i] = string();
(*erasure_code_profile_map)[*i] = string();
} else {
const string key = i->substr(0, equal);
equal++;
const string value = i->substr(equal);
if (key.find("ruleset-") == 0) {
*ss << "property '" << key << "' is no longer supported; try "
<< "'crush-" << key.substr(8) << "' instead";
return -EINVAL;
}
user_map[key] = value;
(*erasure_code_profile_map)[key] = value;
}
}
if (user_map.count("plugin") && user_map["plugin"] != default_plugin)
(*erasure_code_profile_map) = user_map;
return 0;
}
int OSDMonitor::prepare_pool_size(const unsigned pool_type,
const string &erasure_code_profile,
uint8_t repl_size,
unsigned *size, unsigned *min_size,
ostream *ss)
{
int err = 0;
bool set_min_size = false;
switch (pool_type) {
case pg_pool_t::TYPE_REPLICATED:
if (osdmap.stretch_mode_enabled) {
if (repl_size == 0)
repl_size = g_conf().get_val<uint64_t>("mon_stretch_pool_size");
if (repl_size != g_conf().get_val<uint64_t>("mon_stretch_pool_size")) {
*ss << "prepare_pool_size: we are in stretch mode but size "
<< repl_size << " does not match!";
return -EINVAL;
}
*min_size = g_conf().get_val<uint64_t>("mon_stretch_pool_min_size");
set_min_size = true;
}
if (repl_size == 0) {
repl_size = g_conf().get_val<uint64_t>("osd_pool_default_size");
}
*size = repl_size;
if (!set_min_size)
*min_size = g_conf().get_osd_pool_default_min_size(repl_size);
break;
case pg_pool_t::TYPE_ERASURE:
{
if (osdmap.stretch_mode_enabled) {
*ss << "prepare_pool_size: we are in stretch mode; cannot create EC pools!";
return -EINVAL;
}
ErasureCodeInterfaceRef erasure_code;
err = get_erasure_code(erasure_code_profile, &erasure_code, ss);
if (err == 0) {
*size = erasure_code->get_chunk_count();
*min_size =
erasure_code->get_data_chunk_count() +
std::min<int>(1, erasure_code->get_coding_chunk_count() - 1);
assert(*min_size <= *size);
assert(*min_size >= erasure_code->get_data_chunk_count());
}
}
break;
default:
*ss << "prepare_pool_size: " << pool_type << " is not a known pool type";
err = -EINVAL;
break;
}
return err;
}
int OSDMonitor::prepare_pool_stripe_width(const unsigned pool_type,
const string &erasure_code_profile,
uint32_t *stripe_width,
ostream *ss)
{
int err = 0;
switch (pool_type) {
case pg_pool_t::TYPE_REPLICATED:
// ignored
break;
case pg_pool_t::TYPE_ERASURE:
{
ErasureCodeProfile profile =
osdmap.get_erasure_code_profile(erasure_code_profile);
ErasureCodeInterfaceRef erasure_code;
err = get_erasure_code(erasure_code_profile, &erasure_code, ss);
if (err)
break;
uint32_t data_chunks = erasure_code->get_data_chunk_count();
uint32_t stripe_unit = g_conf().get_val<Option::size_t>("osd_pool_erasure_code_stripe_unit");
auto it = profile.find("stripe_unit");
if (it != profile.end()) {
string err_str;
stripe_unit = strict_iecstrtoll(it->second, &err_str);
ceph_assert(err_str.empty());
}
*stripe_width = data_chunks *
erasure_code->get_chunk_size(stripe_unit * data_chunks);
}
break;
default:
*ss << "prepare_pool_stripe_width: "
<< pool_type << " is not a known pool type";
err = -EINVAL;
break;
}
return err;
}
int OSDMonitor::get_replicated_stretch_crush_rule()
{
/* we don't write down the stretch rule anywhere, so
* we have to guess it. How? Look at all the pools
* and count up how many times a given rule is used
* on stretch pools and then return the one with
* the most users!
*/
map<int,int> rule_counts;
for (const auto& pooli : osdmap.pools) {
const pg_pool_t& p = pooli.second;
if (p.is_replicated() && p.is_stretch_pool()) {
if (!rule_counts.count(p.crush_rule)) {
rule_counts[p.crush_rule] = 1;
} else {
++rule_counts[p.crush_rule];
}
}
}
if (rule_counts.empty()) {
return -ENOENT;
}
int most_used_count = 0;
int most_used_rule = -1;
for (auto i : rule_counts) {
if (i.second > most_used_count) {
most_used_rule = i.first;
most_used_count = i.second;
}
}
ceph_assert(most_used_count > 0);
ceph_assert(most_used_rule >= 0);
return most_used_rule;
}
int OSDMonitor::prepare_pool_crush_rule(const unsigned pool_type,
const string &erasure_code_profile,
const string &rule_name,
int *crush_rule,
ostream *ss)
{
if (*crush_rule < 0) {
switch (pool_type) {
case pg_pool_t::TYPE_REPLICATED:
{
if (rule_name == "") {
if (osdmap.stretch_mode_enabled) {
*crush_rule = get_replicated_stretch_crush_rule();
} else {
// Use default rule
*crush_rule = osdmap.crush->get_osd_pool_default_crush_replicated_rule(cct);
}
if (*crush_rule < 0) {
// Errors may happen e.g. if no valid rule is available
*ss << "No suitable CRUSH rule exists, check "
<< "'osd pool default crush *' config options";
return -ENOENT;
}
} else {
return get_crush_rule(rule_name, crush_rule, ss);
}
}
break;
case pg_pool_t::TYPE_ERASURE:
{
int err = crush_rule_create_erasure(rule_name,
erasure_code_profile,
crush_rule, ss);
switch (err) {
case -EALREADY:
dout(20) << "prepare_pool_crush_rule: rule "
<< rule_name << " try again" << dendl;
// fall through
case 0:
// need to wait for the crush rule to be proposed before proceeding
err = -EAGAIN;
break;
case -EEXIST:
err = 0;
break;
}
return err;
}
break;
default:
*ss << "prepare_pool_crush_rule: " << pool_type
<< " is not a known pool type";
return -EINVAL;
}
} else {
if (!osdmap.crush->rule_exists(*crush_rule)) {
*ss << "CRUSH rule " << *crush_rule << " not found";
return -ENOENT;
}
}
return 0;
}
int OSDMonitor::get_crush_rule(const string &rule_name,
int *crush_rule,
ostream *ss)
{
int ret;
ret = osdmap.crush->get_rule_id(rule_name);
if (ret != -ENOENT) {
// found it, use it
*crush_rule = ret;
} else {
CrushWrapper newcrush = _get_pending_crush();
ret = newcrush.get_rule_id(rule_name);
if (ret != -ENOENT) {
// found it, wait for it to be proposed
dout(20) << __func__ << ": rule " << rule_name
<< " try again" << dendl;
return -EAGAIN;
} else {
// Cannot find it , return error
*ss << "specified rule " << rule_name << " doesn't exist";
return ret;
}
}
return 0;
}
/*
* Get the number of 'in' osds according to the crush_rule,
*/
uint32_t OSDMonitor::get_osd_num_by_crush(int crush_rule)
{
set<int> out_osds;
set<int> crush_in_osds;
set<int> roots;
CrushWrapper newcrush = _get_pending_crush();
newcrush.find_takes_by_rule(crush_rule, &roots);
for (auto root : roots) {
const char *rootname = newcrush.get_item_name(root);
set<int> crush_all_osds;
newcrush.get_leaves(rootname, &crush_all_osds);
std::set_difference(crush_all_osds.begin(), crush_all_osds.end(),
out_osds.begin(), out_osds.end(),
std::inserter(crush_in_osds, crush_in_osds.end()));
}
return crush_in_osds.size();
}
int OSDMonitor::check_pg_num(int64_t pool,
int pg_num,
int size,
int crush_rule,
ostream *ss)
{
auto max_pgs_per_osd = g_conf().get_val<uint64_t>("mon_max_pg_per_osd");
uint64_t projected = 0;
uint32_t osd_num_by_crush = 0;
set<int64_t> crush_pool_ids;
if (pool < 0) {
// a new pool
projected += pg_num * size;
}
osd_num_by_crush = get_osd_num_by_crush(crush_rule);
osdmap.get_pool_ids_by_rule(crush_rule, &crush_pool_ids);
for (const auto& [pool_id, pool_info] : osdmap.get_pools()) {
// Check only for pools affected by crush rule
if (crush_pool_ids.contains(pool_id)) {
if (pool_id == pool) {
// Specified pool, use given pg_num and size values.
projected += pg_num * size;
} else {
// Use pg_num_target for evaluating the projected pg num
projected += pool_info.get_pg_num_target() * pool_info.get_size();
}
}
}
// assume min cluster size 3
osd_num_by_crush = std::max(osd_num_by_crush, 3u);
auto projected_pgs_per_osd = projected / osd_num_by_crush;
if (projected_pgs_per_osd > max_pgs_per_osd) {
if (pool >= 0) {
*ss << "pool id " << pool;
}
*ss << " pg_num " << pg_num
<< " size " << size
<< " for this pool would result in "
<< projected_pgs_per_osd
<< " cumulative PGs per OSD (" << projected
<< " total PG replicas on " << osd_num_by_crush
<< " 'in' root OSDs by crush rule) "
<< "which exceeds the mon_max_pg_per_osd "
<< "value of " << max_pgs_per_osd;
return -ERANGE;
}
return 0;
}
/**
* @param name The name of the new pool
* @param crush_rule The crush rule to use. If <0, will use the system default
* @param crush_rule_name The crush rule to use, if crush_rulset <0
* @param pg_num The pg_num to use. If set to 0, will use the system default
* @param pgp_num The pgp_num to use. If set to 0, will use the system default
* @param pg_num_min min pg_num
* @param pg_num_max max pg_num
* @param repl_size Replication factor, or 0 for default
* @param erasure_code_profile The profile name in OSDMap to be used for erasure code
* @param pool_type TYPE_ERASURE, or TYPE_REP
* @param expected_num_objects expected number of objects on the pool
* @param fast_read fast read type.
* @param pg_autoscale_mode autoscale mode, one of on, off, warn
* @param bool bulk indicates whether pool should be a bulk pool
* @param bool crimson indicates whether pool is a crimson pool
* @param ss human readable error message, if any.
*
* @return 0 on success, negative errno on failure.
*/
int OSDMonitor::prepare_new_pool(string& name,
int crush_rule,
const string &crush_rule_name,
unsigned pg_num, unsigned pgp_num,
unsigned pg_num_min,
unsigned pg_num_max,
const uint64_t repl_size,
const uint64_t target_size_bytes,
const float target_size_ratio,
const string &erasure_code_profile,
const unsigned pool_type,
const uint64_t expected_num_objects,
FastReadType fast_read,
string pg_autoscale_mode,
bool bulk,
bool crimson,
ostream *ss)
{
if (crimson && pg_autoscale_mode.empty()) {
// default pg_autoscale_mode to off for crimson, we'll error out below if
// the user tried to actually set pg_autoscale_mode to something other than
// "off"
pg_autoscale_mode = "off";
}
if (name.length() == 0)
return -EINVAL;
if (pg_num == 0) {
auto pg_num_from_mode =
[pg_num=g_conf().get_val<uint64_t>("osd_pool_default_pg_num")]
(const string& mode) {
return mode == "on" ? 1 : pg_num;
};
pg_num = pg_num_from_mode(
pg_autoscale_mode.empty() ?
g_conf().get_val<string>("osd_pool_default_pg_autoscale_mode") :
pg_autoscale_mode);
}
if (pgp_num == 0)
pgp_num = g_conf().get_val<uint64_t>("osd_pool_default_pgp_num");
if (!pgp_num)
pgp_num = pg_num;
if (pg_num > g_conf().get_val<uint64_t>("mon_max_pool_pg_num")) {
*ss << "'pg_num' must be greater than 0 and less than or equal to "
<< g_conf().get_val<uint64_t>("mon_max_pool_pg_num")
<< " (you may adjust 'mon max pool pg num' for higher values)";
return -ERANGE;
}
if (pgp_num > pg_num) {
*ss << "'pgp_num' must be greater than 0 and lower or equal than 'pg_num'"
<< ", which in this case is " << pg_num;
return -ERANGE;
}
if (crimson) {
/* crimson-osd requires that the pool be replicated and that pg_num/pgp_num
* be static. User must also have specified set-allow-crimson */
const auto *suffix = " (--crimson specified or osd_pool_default_crimson set)";
if (pool_type != pg_pool_t::TYPE_REPLICATED) {
*ss << "crimson-osd only supports replicated pools" << suffix;
return -EINVAL;
} else if (pg_autoscale_mode != "off") {
*ss << "crimson-osd does not support changing pg_num or pgp_num, "
<< "pg_autoscale_mode must be set to 'off'" << suffix;
return -EINVAL;
} else if (!osdmap.get_allow_crimson()) {
*ss << "set-allow-crimson must be set to create a pool with the "
<< "crimson flag" << suffix;
return -EINVAL;
}
}
if (pool_type == pg_pool_t::TYPE_REPLICATED && fast_read == FAST_READ_ON) {
*ss << "'fast_read' can only apply to erasure coding pool";
return -EINVAL;
}
int r;
r = prepare_pool_crush_rule(pool_type, erasure_code_profile,
crush_rule_name, &crush_rule, ss);
if (r) {
dout(10) << "prepare_pool_crush_rule returns " << r << dendl;
return r;
}
unsigned size, min_size;
r = prepare_pool_size(pool_type, erasure_code_profile, repl_size,
&size, &min_size, ss);
if (r) {
dout(10) << "prepare_pool_size returns " << r << dendl;
return r;
}
if (g_conf()->mon_osd_crush_smoke_test) {
CrushWrapper newcrush = _get_pending_crush();
ostringstream err;
CrushTester tester(newcrush, err);
tester.set_min_x(0);
tester.set_max_x(50);
tester.set_rule(crush_rule);
tester.set_num_rep(size);
auto start = ceph::coarse_mono_clock::now();
r = tester.test_with_fork(cct, g_conf()->mon_lease);
dout(10) << __func__ << " crush test_with_fork tester created " << dendl;
auto duration = ceph::coarse_mono_clock::now() - start;
if (r < 0) {
dout(10) << "tester.test_with_fork returns " << r
<< ": " << err.str() << dendl;
*ss << "crush test failed with " << r << ": " << err.str();
return r;
}
dout(10) << __func__ << " crush smoke test duration: "
<< duration << dendl;
}
r = check_pg_num(-1, pg_num, size, crush_rule, ss);
if (r) {
dout(10) << "check_pg_num returns " << r << dendl;
return r;
}
if (osdmap.crush->get_rule_type(crush_rule) != (int)pool_type) {
*ss << "crush rule " << crush_rule << " type does not match pool";
return -EINVAL;
}
uint32_t stripe_width = 0;
r = prepare_pool_stripe_width(pool_type, erasure_code_profile, &stripe_width, ss);
if (r) {
dout(10) << "prepare_pool_stripe_width returns " << r << dendl;
return r;
}
bool fread = false;
if (pool_type == pg_pool_t::TYPE_ERASURE) {
switch (fast_read) {
case FAST_READ_OFF:
fread = false;
break;
case FAST_READ_ON:
fread = true;
break;
case FAST_READ_DEFAULT:
fread = g_conf()->osd_pool_default_ec_fast_read;
break;
default:
*ss << "invalid fast_read setting: " << fast_read;
return -EINVAL;
}
}
for (map<int64_t,string>::iterator p = pending_inc.new_pool_names.begin();
p != pending_inc.new_pool_names.end();
++p) {
if (p->second == name)
return 0;
}
if (-1 == pending_inc.new_pool_max)
pending_inc.new_pool_max = osdmap.pool_max;
int64_t pool = ++pending_inc.new_pool_max;
pg_pool_t empty;
pg_pool_t *pi = pending_inc.get_new_pool(pool, &empty);
pi->create_time = ceph_clock_now();
pi->type = pool_type;
pi->fast_read = fread;
pi->flags = g_conf()->osd_pool_default_flags;
if (bulk) {
pi->set_flag(pg_pool_t::FLAG_BULK);
} else if (g_conf()->osd_pool_default_flag_bulk) {
pi->set_flag(pg_pool_t::FLAG_BULK);
}
if (g_conf()->osd_pool_default_flag_hashpspool)
pi->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
if (g_conf()->osd_pool_default_flag_nodelete)
pi->set_flag(pg_pool_t::FLAG_NODELETE);
if (g_conf()->osd_pool_default_flag_nopgchange)
pi->set_flag(pg_pool_t::FLAG_NOPGCHANGE);
if (g_conf()->osd_pool_default_flag_nosizechange)
pi->set_flag(pg_pool_t::FLAG_NOSIZECHANGE);
pi->set_flag(pg_pool_t::FLAG_CREATING);
if (g_conf()->osd_pool_use_gmt_hitset)
pi->use_gmt_hitset = true;
else
pi->use_gmt_hitset = false;
if (crimson) {
pi->set_flag(pg_pool_t::FLAG_CRIMSON);
pi->set_flag(pg_pool_t::FLAG_NOPGCHANGE);
}
pi->size = size;
pi->min_size = min_size;
pi->crush_rule = crush_rule;
pi->expected_num_objects = expected_num_objects;
pi->object_hash = CEPH_STR_HASH_RJENKINS;
if (osdmap.stretch_mode_enabled) {
pi->peering_crush_bucket_count = osdmap.stretch_bucket_count;
pi->peering_crush_bucket_target = osdmap.stretch_bucket_count;
pi->peering_crush_bucket_barrier = osdmap.stretch_mode_bucket;
pi->peering_crush_mandatory_member = CRUSH_ITEM_NONE;
if (osdmap.degraded_stretch_mode) {
pi->peering_crush_bucket_count = osdmap.degraded_stretch_mode;
pi->peering_crush_bucket_target = osdmap.degraded_stretch_mode;
// pi->peering_crush_bucket_mandatory_member = CRUSH_ITEM_NONE;
// TODO: drat, we don't record this ^ anywhere, though given that it
// necessarily won't exist elsewhere it likely doesn't matter
pi->min_size = pi->min_size / 2;
pi->size = pi->size / 2; // only support 2 zones now
}
}
if (auto m = pg_pool_t::get_pg_autoscale_mode_by_name(
g_conf().get_val<string>("osd_pool_default_pg_autoscale_mode"));
m != pg_pool_t::pg_autoscale_mode_t::UNKNOWN) {
pi->pg_autoscale_mode = m;
} else {
pi->pg_autoscale_mode = pg_pool_t::pg_autoscale_mode_t::OFF;
}
auto max = g_conf().get_val<int64_t>("mon_osd_max_initial_pgs");
pi->set_pg_num(
max > 0 ? std::min<uint64_t>(pg_num, std::max<int64_t>(1, max))
: pg_num);
pi->set_pg_num_pending(pi->get_pg_num());
pi->set_pg_num_target(pg_num);
pi->set_pgp_num(pi->get_pg_num());
pi->set_pgp_num_target(pgp_num);
if (osdmap.require_osd_release >= ceph_release_t::nautilus &&
pg_num_min) {
pi->opts.set(pool_opts_t::PG_NUM_MIN, static_cast<int64_t>(pg_num_min));
}
if (osdmap.require_osd_release >= ceph_release_t::quincy &&
pg_num_max) {
pi->opts.set(pool_opts_t::PG_NUM_MAX, static_cast<int64_t>(pg_num_max));
}
if (auto m = pg_pool_t::get_pg_autoscale_mode_by_name(
pg_autoscale_mode); m != pg_pool_t::pg_autoscale_mode_t::UNKNOWN) {
pi->pg_autoscale_mode = m;
}
pi->last_change = pending_inc.epoch;
pi->auid = 0;
if (pool_type == pg_pool_t::TYPE_ERASURE) {
pi->erasure_code_profile = erasure_code_profile;
} else {
pi->erasure_code_profile = "";
}
pi->stripe_width = stripe_width;
if (osdmap.require_osd_release >= ceph_release_t::nautilus &&
target_size_bytes) {
// only store for nautilus+ because TARGET_SIZE_BYTES may be
// larger than int32_t max.
pi->opts.set(pool_opts_t::TARGET_SIZE_BYTES, static_cast<int64_t>(target_size_bytes));
}
if (target_size_ratio > 0.0 &&
osdmap.require_osd_release >= ceph_release_t::nautilus) {
// only store for nautilus+, just to be consistent and tidy.
pi->opts.set(pool_opts_t::TARGET_SIZE_RATIO, target_size_ratio);
}
pi->cache_target_dirty_ratio_micro =
g_conf()->osd_pool_default_cache_target_dirty_ratio * 1000000;
pi->cache_target_dirty_high_ratio_micro =
g_conf()->osd_pool_default_cache_target_dirty_high_ratio * 1000000;
pi->cache_target_full_ratio_micro =
g_conf()->osd_pool_default_cache_target_full_ratio * 1000000;
pi->cache_min_flush_age = g_conf()->osd_pool_default_cache_min_flush_age;
pi->cache_min_evict_age = g_conf()->osd_pool_default_cache_min_evict_age;
pending_inc.new_pool_names[pool] = name;
return 0;
}
bool OSDMonitor::prepare_set_flag(MonOpRequestRef op, int flag)
{
op->mark_osdmon_event(__func__);
ostringstream ss;
if (pending_inc.new_flags < 0)
pending_inc.new_flags = osdmap.get_flags();
pending_inc.new_flags |= flag;
ss << OSDMap::get_flag_string(flag) << " is set";
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
}
bool OSDMonitor::prepare_unset_flag(MonOpRequestRef op, int flag)
{
op->mark_osdmon_event(__func__);
ostringstream ss;
if (pending_inc.new_flags < 0)
pending_inc.new_flags = osdmap.get_flags();
pending_inc.new_flags &= ~flag;
ss << OSDMap::get_flag_string(flag) << " is unset";
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
}
int OSDMonitor::prepare_command_pool_set(const cmdmap_t& cmdmap,
stringstream& ss)
{
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool = osdmap.lookup_pg_pool_name(poolstr.c_str());
if (pool < 0) {
ss << "unrecognized pool '" << poolstr << "'";
return -ENOENT;
}
string var;
cmd_getval(cmdmap, "var", var);
pg_pool_t p = *osdmap.get_pg_pool(pool);
if (pending_inc.new_pools.count(pool))
p = pending_inc.new_pools[pool];
// accept val as a json string in the normal case (current
// generation monitor). parse out int or float values from the
// string as needed. however, if it is not a string, try to pull
// out an int, in case an older monitor with an older json schema is
// forwarding a request.
string val;
string interr, floaterr;
int64_t n = 0;
double f = 0;
int64_t uf = 0; // micro-f
cmd_getval(cmdmap, "val", val);
auto si_options = {
"target_max_objects"
};
auto iec_options = {
"target_max_bytes",
"target_size_bytes",
"compression_max_blob_size",
"compression_min_blob_size",
"csum_max_block",
"csum_min_block",
};
if (count(begin(si_options), end(si_options), var)) {
n = strict_si_cast<int64_t>(val, &interr);
} else if (count(begin(iec_options), end(iec_options), var)) {
n = strict_iec_cast<int64_t>(val, &interr);
} else {
// parse string as both int and float; different fields use different types.
n = strict_strtoll(val.c_str(), 10, &interr);
f = strict_strtod(val.c_str(), &floaterr);
uf = llrintl(f * (double)1000000.0);
}
if (!p.is_tier() &&
(var == "hit_set_type" || var == "hit_set_period" ||
var == "hit_set_count" || var == "hit_set_fpp" ||
var == "target_max_objects" || var == "target_max_bytes" ||
var == "cache_target_full_ratio" || var == "cache_target_dirty_ratio" ||
var == "cache_target_dirty_high_ratio" || var == "use_gmt_hitset" ||
var == "cache_min_flush_age" || var == "cache_min_evict_age" ||
var == "hit_set_grade_decay_rate" || var == "hit_set_search_last_n" ||
var == "min_read_recency_for_promote" || var == "min_write_recency_for_promote")) {
return -EACCES;
}
if (var == "size") {
if (p.has_flag(pg_pool_t::FLAG_NOSIZECHANGE)) {
ss << "pool size change is disabled; you must unset nosizechange flag for the pool first";
return -EPERM;
}
if (p.type == pg_pool_t::TYPE_ERASURE) {
ss << "can not change the size of an erasure-coded pool";
return -ENOTSUP;
}
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n <= 0 || n > 10) {
ss << "pool size must be between 1 and 10";
return -EINVAL;
}
if (n == 1) {
if (!g_conf().get_val<bool>("mon_allow_pool_size_one")) {
ss << "configuring pool size as 1 is disabled by default.";
return -EPERM;
}
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) { ss << "WARNING: setting pool size 1 could lead to data loss "
"without recovery. If you are *ABSOLUTELY CERTAIN* that is what you want, "
"pass the flag --yes-i-really-mean-it.";
return -EPERM;
}
}
if (osdmap.crush->get_rule_type(p.get_crush_rule()) != (int)p.type) {
ss << "crush rule " << p.get_crush_rule() << " type does not match pool";
return -EINVAL;
}
if (n > p.size) {
// only when increasing pool size
int r = check_pg_num(pool, p.get_pg_num(), n, p.get_crush_rule(), &ss);
if (r < 0) {
return r;
}
}
p.size = n;
p.min_size = g_conf().get_osd_pool_default_min_size(p.size);
} else if (var == "min_size") {
if (p.has_flag(pg_pool_t::FLAG_NOSIZECHANGE)) {
ss << "pool min size change is disabled; you must unset nosizechange flag for the pool first";
return -EPERM;
}
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (p.type != pg_pool_t::TYPE_ERASURE) {
if (n < 1 || n > p.size) {
ss << "pool min_size must be between 1 and size, which is set to " << (int)p.size;
return -EINVAL;
}
} else {
ErasureCodeInterfaceRef erasure_code;
int k;
stringstream tmp;
int err = get_erasure_code(p.erasure_code_profile, &erasure_code, &tmp);
if (err == 0) {
k = erasure_code->get_data_chunk_count();
} else {
ss << __func__ << " get_erasure_code failed: " << tmp.str();
return err;
}
if (n < k || n > p.size) {
ss << "pool min_size must be between " << k << " and size, which is set to " << (int)p.size;
return -EINVAL;
}
}
p.min_size = n;
} else if (var == "pg_num_actual") {
if (p.has_flag(pg_pool_t::FLAG_NOPGCHANGE)) {
ss << "pool pg_num change is disabled; you must unset nopgchange flag for the pool first";
return -EPERM;
}
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n == (int)p.get_pg_num()) {
return 0;
}
if (static_cast<uint64_t>(n) > g_conf().get_val<uint64_t>("mon_max_pool_pg_num")) {
ss << "'pg_num' must be greater than 0 and less than or equal to "
<< g_conf().get_val<uint64_t>("mon_max_pool_pg_num")
<< " (you may adjust 'mon max pool pg num' for higher values)";
return -ERANGE;
}
if (p.has_flag(pg_pool_t::FLAG_CREATING)) {
ss << "cannot adjust pg_num while initial PGs are being created";
return -EBUSY;
}
if (n > (int)p.get_pg_num()) {
if (p.get_pg_num() != p.get_pg_num_pending()) {
// force pre-nautilus clients to resend their ops, since they
// don't understand pg_num_pending changes form a new interval
p.last_force_op_resend_prenautilus = pending_inc.epoch;
}
p.set_pg_num(n);
} else {
if (osdmap.require_osd_release < ceph_release_t::nautilus) {
ss << "nautilus OSDs are required to adjust pg_num_pending";
return -EPERM;
}
if (n < (int)p.get_pgp_num()) {
ss << "specified pg_num " << n << " < pgp_num " << p.get_pgp_num();
return -EINVAL;
}
if (n < (int)p.get_pg_num() - 1) {
ss << "specified pg_num " << n << " < pg_num (" << p.get_pg_num()
<< ") - 1; only single pg decrease is currently supported";
return -EINVAL;
}
p.set_pg_num_pending(n);
// force pre-nautilus clients to resend their ops, since they
// don't understand pg_num_pending changes form a new interval
p.last_force_op_resend_prenautilus = pending_inc.epoch;
}
// force pre-luminous clients to resend their ops, since they
// don't understand that split PGs now form a new interval.
p.last_force_op_resend_preluminous = pending_inc.epoch;
} else if (var == "pg_num") {
if (p.has_flag(pg_pool_t::FLAG_NOPGCHANGE)) {
ss << "pool pg_num change is disabled; you must unset nopgchange flag for the pool first";
return -EPERM;
}
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n == (int)p.get_pg_num_target()) {
return 0;
}
if (n <= 0 || static_cast<uint64_t>(n) >
g_conf().get_val<uint64_t>("mon_max_pool_pg_num")) {
ss << "'pg_num' must be greater than 0 and less than or equal to "
<< g_conf().get_val<uint64_t>("mon_max_pool_pg_num")
<< " (you may adjust 'mon max pool pg num' for higher values)";
return -ERANGE;
}
if (n > (int)p.get_pg_num_target()) {
int r = check_pg_num(pool, n, p.get_size(), p.get_crush_rule(), &ss);
if (r) {
return r;
}
bool force = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", force);
if (p.cache_mode != pg_pool_t::CACHEMODE_NONE && !force) {
ss << "splits in cache pools must be followed by scrubs and leave sufficient free space to avoid overfilling. use --yes-i-really-mean-it to force.";
return -EPERM;
}
} else {
if (osdmap.require_osd_release < ceph_release_t::nautilus) {
ss << "nautilus OSDs are required to decrease pg_num";
return -EPERM;
}
}
int64_t pg_min = 0, pg_max = 0;
p.opts.get(pool_opts_t::PG_NUM_MIN, &pg_min);
p.opts.get(pool_opts_t::PG_NUM_MAX, &pg_max);
if (pg_min && n < pg_min) {
ss << "specified pg_num " << n
<< " < pg_num_min " << pg_min;
return -EINVAL;
}
if (pg_max && n > pg_max) {
ss << "specified pg_num " << n
<< " < pg_num_max " << pg_max;
return -EINVAL;
}
if (osdmap.require_osd_release < ceph_release_t::nautilus) {
// pre-nautilus osdmap format; increase pg_num directly
assert(n > (int)p.get_pg_num());
// force pre-nautilus clients to resend their ops, since they
// don't understand pg_num_target changes form a new interval
p.last_force_op_resend_prenautilus = pending_inc.epoch;
// force pre-luminous clients to resend their ops, since they
// don't understand that split PGs now form a new interval.
p.last_force_op_resend_preluminous = pending_inc.epoch;
p.set_pg_num(n);
} else {
// set targets; mgr will adjust pg_num_actual and pgp_num later.
// make pgp_num track pg_num if it already matches. if it is set
// differently, leave it different and let the user control it
// manually.
if (p.get_pg_num_target() == p.get_pgp_num_target()) {
p.set_pgp_num_target(n);
}
p.set_pg_num_target(n);
}
} else if (var == "pgp_num_actual") {
if (p.has_flag(pg_pool_t::FLAG_NOPGCHANGE)) {
ss << "pool pgp_num change is disabled; you must unset nopgchange flag for the pool first";
return -EPERM;
}
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n <= 0) {
ss << "specified pgp_num must > 0, but you set to " << n;
return -EINVAL;
}
if (n > (int)p.get_pg_num()) {
ss << "specified pgp_num " << n << " > pg_num " << p.get_pg_num();
return -EINVAL;
}
if (n > (int)p.get_pg_num_pending()) {
ss << "specified pgp_num " << n
<< " > pg_num_pending " << p.get_pg_num_pending();
return -EINVAL;
}
p.set_pgp_num(n);
} else if (var == "pgp_num") {
if (p.has_flag(pg_pool_t::FLAG_NOPGCHANGE)) {
ss << "pool pgp_num change is disabled; you must unset nopgchange flag for the pool first";
return -EPERM;
}
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n <= 0) {
ss << "specified pgp_num must > 0, but you set to " << n;
return -EINVAL;
}
if (n > (int)p.get_pg_num_target()) {
ss << "specified pgp_num " << n << " > pg_num " << p.get_pg_num_target();
return -EINVAL;
}
if (osdmap.require_osd_release < ceph_release_t::nautilus) {
// pre-nautilus osdmap format; increase pgp_num directly
p.set_pgp_num(n);
} else {
p.set_pgp_num_target(n);
}
} else if (var == "pg_autoscale_mode") {
auto m = pg_pool_t::get_pg_autoscale_mode_by_name(val);
if (m == pg_pool_t::pg_autoscale_mode_t::UNKNOWN) {
ss << "specified invalid mode " << val;
return -EINVAL;
}
if (osdmap.require_osd_release < ceph_release_t::nautilus) {
ss << "must set require_osd_release to nautilus or later before setting pg_autoscale_mode";
return -EINVAL;
}
p.pg_autoscale_mode = m;
} else if (var == "crush_rule") {
int id = osdmap.crush->get_rule_id(val);
if (id == -ENOENT) {
ss << "crush rule " << val << " does not exist";
return -ENOENT;
}
if (id < 0) {
ss << cpp_strerror(id);
return -ENOENT;
}
if (osdmap.crush->get_rule_type(id) != (int)p.get_type()) {
ss << "crush rule " << id << " type does not match pool";
return -EINVAL;
}
p.crush_rule = id;
} else if (var == "nodelete" || var == "nopgchange" ||
var == "nosizechange" || var == "write_fadvise_dontneed" ||
var == "noscrub" || var == "nodeep-scrub" || var == "bulk") {
uint64_t flag = pg_pool_t::get_flag_by_name(var);
// make sure we only compare against 'n' if we didn't receive a string
if (val == "true" || (interr.empty() && n == 1)) {
p.set_flag(flag);
} else if (val == "false" || (interr.empty() && n == 0)) {
if (flag == pg_pool_t::FLAG_NOPGCHANGE && p.is_crimson()) {
ss << "cannot clear FLAG_NOPGCHANGE on a crimson pool";
return -EINVAL;
}
p.unset_flag(flag);
} else {
ss << "expecting value 'true', 'false', '0', or '1'";
return -EINVAL;
}
} else if (var == "eio") {
uint64_t flag = pg_pool_t::get_flag_by_name(var);
// make sure we only compare against 'n' if we didn't receive a string
if (val == "true" || (interr.empty() && n == 1)) {
p.set_flag(flag);
} else if (val == "false" || (interr.empty() && n == 0)) {
p.unset_flag(flag);
} else {
ss << "expecting value 'true', 'false', '0', or '1'";
return -EINVAL;
}
} else if (var == "hashpspool") {
uint64_t flag = pg_pool_t::get_flag_by_name(var);
bool force = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", force);
if (!force) {
ss << "are you SURE? this will remap all placement groups in this pool,"
" this triggers large data movement,"
" pass --yes-i-really-mean-it if you really do.";
return -EPERM;
}
// make sure we only compare against 'n' if we didn't receive a string
if (val == "true" || (interr.empty() && n == 1)) {
p.set_flag(flag);
} else if (val == "false" || (interr.empty() && n == 0)) {
p.unset_flag(flag);
} else {
ss << "expecting value 'true', 'false', '0', or '1'";
return -EINVAL;
}
} else if (var == "hit_set_type") {
if (val == "none")
p.hit_set_params = HitSet::Params();
else {
int err = check_cluster_features(CEPH_FEATURE_OSD_CACHEPOOL, ss);
if (err)
return err;
if (val == "bloom") {
BloomHitSet::Params *bsp = new BloomHitSet::Params;
bsp->set_fpp(g_conf().get_val<double>("osd_pool_default_hit_set_bloom_fpp"));
p.hit_set_params = HitSet::Params(bsp);
} else if (val == "explicit_hash")
p.hit_set_params = HitSet::Params(new ExplicitHashHitSet::Params);
else if (val == "explicit_object")
p.hit_set_params = HitSet::Params(new ExplicitObjectHitSet::Params);
else {
ss << "unrecognized hit_set type '" << val << "'";
return -EINVAL;
}
}
} else if (var == "hit_set_period") {
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
} else if (n < 0) {
ss << "hit_set_period should be non-negative";
return -EINVAL;
}
p.hit_set_period = n;
} else if (var == "hit_set_count") {
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
} else if (n < 0) {
ss << "hit_set_count should be non-negative";
return -EINVAL;
}
p.hit_set_count = n;
} else if (var == "hit_set_fpp") {
if (floaterr.length()) {
ss << "error parsing floating point value '" << val << "': " << floaterr;
return -EINVAL;
} else if (f < 0 || f > 1.0) {
ss << "hit_set_fpp should be in the range 0..1";
return -EINVAL;
}
if (p.hit_set_params.get_type() != HitSet::TYPE_BLOOM) {
ss << "hit set is not of type Bloom; invalid to set a false positive rate!";
return -EINVAL;
}
BloomHitSet::Params *bloomp = static_cast<BloomHitSet::Params*>(p.hit_set_params.impl.get());
bloomp->set_fpp(f);
} else if (var == "use_gmt_hitset") {
if (val == "true" || (interr.empty() && n == 1)) {
p.use_gmt_hitset = true;
} else {
ss << "expecting value 'true' or '1'";
return -EINVAL;
}
} else if (var == "allow_ec_overwrites") {
if (!p.is_erasure()) {
ss << "ec overwrites can only be enabled for an erasure coded pool";
return -EINVAL;
}
stringstream err;
if (!g_conf()->mon_debug_no_require_bluestore_for_ec_overwrites &&
!is_pool_currently_all_bluestore(pool, p, &err)) {
ss << "pool must only be stored on bluestore for scrubbing to work: " << err.str();
return -EINVAL;
}
if (val == "true" || (interr.empty() && n == 1)) {
p.flags |= pg_pool_t::FLAG_EC_OVERWRITES;
} else if (val == "false" || (interr.empty() && n == 0)) {
ss << "ec overwrites cannot be disabled once enabled";
return -EINVAL;
} else {
ss << "expecting value 'true', 'false', '0', or '1'";
return -EINVAL;
}
} else if (var == "target_max_objects") {
if (interr.length()) {
ss << "error parsing int '" << val << "': " << interr;
return -EINVAL;
}
p.target_max_objects = n;
} else if (var == "target_max_bytes") {
if (interr.length()) {
ss << "error parsing int '" << val << "': " << interr;
return -EINVAL;
}
p.target_max_bytes = n;
} else if (var == "cache_target_dirty_ratio") {
if (floaterr.length()) {
ss << "error parsing float '" << val << "': " << floaterr;
return -EINVAL;
}
if (f < 0 || f > 1.0) {
ss << "value must be in the range 0..1";
return -ERANGE;
}
p.cache_target_dirty_ratio_micro = uf;
} else if (var == "cache_target_dirty_high_ratio") {
if (floaterr.length()) {
ss << "error parsing float '" << val << "': " << floaterr;
return -EINVAL;
}
if (f < 0 || f > 1.0) {
ss << "value must be in the range 0..1";
return -ERANGE;
}
p.cache_target_dirty_high_ratio_micro = uf;
} else if (var == "cache_target_full_ratio") {
if (floaterr.length()) {
ss << "error parsing float '" << val << "': " << floaterr;
return -EINVAL;
}
if (f < 0 || f > 1.0) {
ss << "value must be in the range 0..1";
return -ERANGE;
}
p.cache_target_full_ratio_micro = uf;
} else if (var == "cache_min_flush_age") {
if (interr.length()) {
ss << "error parsing int '" << val << "': " << interr;
return -EINVAL;
}
p.cache_min_flush_age = n;
} else if (var == "cache_min_evict_age") {
if (interr.length()) {
ss << "error parsing int '" << val << "': " << interr;
return -EINVAL;
}
p.cache_min_evict_age = n;
} else if (var == "min_read_recency_for_promote") {
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
p.min_read_recency_for_promote = n;
} else if (var == "hit_set_grade_decay_rate") {
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n > 100 || n < 0) {
ss << "value out of range,valid range is 0 - 100";
return -EINVAL;
}
p.hit_set_grade_decay_rate = n;
} else if (var == "hit_set_search_last_n") {
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n > p.hit_set_count || n < 0) {
ss << "value out of range,valid range is 0 - hit_set_count";
return -EINVAL;
}
p.hit_set_search_last_n = n;
} else if (var == "min_write_recency_for_promote") {
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
p.min_write_recency_for_promote = n;
} else if (var == "fast_read") {
if (p.is_replicated()) {
ss << "fast read is not supported in replication pool";
return -EINVAL;
}
if (val == "true" || (interr.empty() && n == 1)) {
p.fast_read = true;
} else if (val == "false" || (interr.empty() && n == 0)) {
p.fast_read = false;
} else {
ss << "expecting value 'true', 'false', '0', or '1'";
return -EINVAL;
}
} else if (pool_opts_t::is_opt_name(var)) {
bool unset = val == "unset";
if (var == "compression_mode") {
if (!unset) {
auto cmode = Compressor::get_comp_mode_type(val);
if (!cmode) {
ss << "unrecognized compression mode '" << val << "'";
return -EINVAL;
}
}
} else if (var == "compression_algorithm") {
if (!unset) {
auto alg = Compressor::get_comp_alg_type(val);
if (!alg) {
ss << "unrecognized compression_algorithm '" << val << "'";
return -EINVAL;
}
}
} else if (var == "compression_required_ratio") {
if (floaterr.length()) {
ss << "error parsing float value '" << val << "': " << floaterr;
return -EINVAL;
}
if (f < 0 || f > 1) {
ss << "compression_required_ratio is out of range (0-1): '" << val << "'";
return -EINVAL;
}
} else if (var == "csum_type") {
auto t = unset ? 0 : Checksummer::get_csum_string_type(val);
if (t < 0 ) {
ss << "unrecognized csum_type '" << val << "'";
return -EINVAL;
}
//preserve csum_type numeric value
n = t;
interr.clear();
} else if (var == "compression_max_blob_size" ||
var == "compression_min_blob_size" ||
var == "csum_max_block" ||
var == "csum_min_block") {
if (interr.length()) {
ss << "error parsing int value '" << val << "': " << interr;
return -EINVAL;
}
} else if (var == "fingerprint_algorithm") {
if (!unset) {
auto alg = pg_pool_t::get_fingerprint_from_str(val);
if (!alg) {
ss << "unrecognized fingerprint_algorithm '" << val << "'";
return -EINVAL;
}
}
} else if (var == "target_size_bytes") {
if (interr.length()) {
ss << "error parsing unit value '" << val << "': " << interr;
return -EINVAL;
}
if (osdmap.require_osd_release < ceph_release_t::nautilus) {
ss << "must set require_osd_release to nautilus or "
<< "later before setting target_size_bytes";
return -EINVAL;
}
} else if (var == "target_size_ratio") {
if (f < 0.0) {
ss << "target_size_ratio cannot be negative";
return -EINVAL;
}
} else if (var == "pg_num_min") {
if (interr.length()) {
ss << "error parsing int value '" << val << "': " << interr;
return -EINVAL;
}
if (n > (int)p.get_pg_num_target()) {
ss << "specified pg_num_min " << n
<< " > pg_num " << p.get_pg_num_target();
return -EINVAL;
}
} else if (var == "pg_num_max") {
if (interr.length()) {
ss << "error parsing int value '" << val << "': " << interr;
return -EINVAL;
}
if (n && n < (int)p.get_pg_num_target()) {
ss << "specified pg_num_max " << n
<< " < pg_num " << p.get_pg_num_target();
return -EINVAL;
}
} else if (var == "recovery_priority") {
if (interr.length()) {
ss << "error parsing int value '" << val << "': " << interr;
return -EINVAL;
}
if (!g_conf()->debug_allow_any_pool_priority) {
if (n > OSD_POOL_PRIORITY_MAX || n < OSD_POOL_PRIORITY_MIN) {
ss << "pool recovery_priority must be between " << OSD_POOL_PRIORITY_MIN
<< " and " << OSD_POOL_PRIORITY_MAX;
return -EINVAL;
}
}
} else if (var == "pg_autoscale_bias") {
if (f < 0.0 || f > 1000.0) {
ss << "pg_autoscale_bias must be between 0 and 1000";
return -EINVAL;
}
} else if (var == "dedup_tier") {
if (interr.empty()) {
ss << "expecting value 'pool name'";
return -EINVAL;
}
// Current base tier in dedup does not support ec pool
if (p.is_erasure()) {
ss << "pool '" << poolstr
<< "' is an ec pool, which cannot be a base tier";
return -ENOTSUP;
}
int64_t lowtierpool_id = osdmap.lookup_pg_pool_name(val);
if (lowtierpool_id < 0) {
ss << "unrecognized pool '" << val << "'";
return -ENOENT;
}
const pg_pool_t *tp = osdmap.get_pg_pool(lowtierpool_id);
ceph_assert(tp);
n = lowtierpool_id;
// The original input is string (pool name), but we convert it to int64_t.
// So, clear interr
interr.clear();
} else if (var == "dedup_chunk_algorithm") {
if (!unset) {
auto alg = pg_pool_t::get_dedup_chunk_algorithm_from_str(val);
if (!alg) {
ss << "unrecognized fingerprint_algorithm '" << val << "'";
return -EINVAL;
}
}
} else if (var == "dedup_cdc_chunk_size") {
if (interr.length()) {
ss << "error parsing int value '" << val << "': " << interr;
return -EINVAL;
}
}
pool_opts_t::opt_desc_t desc = pool_opts_t::get_opt_desc(var);
switch (desc.type) {
case pool_opts_t::STR:
if (unset) {
p.opts.unset(desc.key);
} else {
p.opts.set(desc.key, static_cast<std::string>(val));
}
break;
case pool_opts_t::INT:
if (interr.length()) {
ss << "error parsing integer value '" << val << "': " << interr;
return -EINVAL;
}
if (n == 0) {
p.opts.unset(desc.key);
} else {
p.opts.set(desc.key, static_cast<int64_t>(n));
}
break;
case pool_opts_t::DOUBLE:
if (floaterr.length()) {
ss << "error parsing floating point value '" << val << "': " << floaterr;
return -EINVAL;
}
if (f == 0) {
p.opts.unset(desc.key);
} else {
p.opts.set(desc.key, static_cast<double>(f));
}
break;
default:
ceph_assert(!"unknown type");
}
} else {
ss << "unrecognized variable '" << var << "'";
return -EINVAL;
}
if (val != "unset") {
ss << "set pool " << pool << " " << var << " to " << val;
} else {
ss << "unset pool " << pool << " " << var;
}
p.last_change = pending_inc.epoch;
pending_inc.new_pools[pool] = p;
return 0;
}
int OSDMonitor::prepare_command_pool_application(const string &prefix,
const cmdmap_t& cmdmap,
stringstream& ss)
{
return _command_pool_application(prefix, cmdmap, ss, nullptr, true);
}
int OSDMonitor::preprocess_command_pool_application(const string &prefix,
const cmdmap_t& cmdmap,
stringstream& ss,
bool *modified)
{
return _command_pool_application(prefix, cmdmap, ss, modified, false);
}
/**
* Common logic for preprocess and prepare phases of pool application
* tag commands. In preprocess mode we're only detecting invalid
* commands, and determining whether it was a modification or a no-op.
* In prepare mode we're actually updating the pending state.
*/
int OSDMonitor::_command_pool_application(const string &prefix,
const cmdmap_t& cmdmap,
stringstream& ss,
bool *modified,
bool preparing)
{
string pool_name;
cmd_getval(cmdmap, "pool", pool_name);
int64_t pool = osdmap.lookup_pg_pool_name(pool_name.c_str());
if (pool < 0) {
ss << "unrecognized pool '" << pool_name << "'";
return -ENOENT;
}
pg_pool_t p = *osdmap.get_pg_pool(pool);
if (preparing) {
if (pending_inc.new_pools.count(pool)) {
p = pending_inc.new_pools[pool];
}
}
string app;
cmd_getval(cmdmap, "app", app);
bool app_exists = (p.application_metadata.count(app) > 0);
string key;
cmd_getval(cmdmap, "key", key);
if (key == "all") {
ss << "key cannot be 'all'";
return -EINVAL;
}
string value;
cmd_getval(cmdmap, "value", value);
if (value == "all") {
ss << "value cannot be 'all'";
return -EINVAL;
}
if (boost::algorithm::ends_with(prefix, "enable")) {
if (app.empty()) {
ss << "application name must be provided";
return -EINVAL;
}
if (p.is_tier()) {
ss << "application must be enabled on base tier";
return -EINVAL;
}
bool force = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", force);
if (!app_exists && !p.application_metadata.empty() && !force) {
ss << "Are you SURE? Pool '" << pool_name << "' already has an enabled "
<< "application; pass --yes-i-really-mean-it to proceed anyway";
return -EPERM;
}
if (!app_exists && p.application_metadata.size() >= MAX_POOL_APPLICATIONS) {
ss << "too many enabled applications on pool '" << pool_name << "'; "
<< "max " << MAX_POOL_APPLICATIONS;
return -EINVAL;
}
if (app.length() > MAX_POOL_APPLICATION_LENGTH) {
ss << "application name '" << app << "' too long; max length "
<< MAX_POOL_APPLICATION_LENGTH;
return -EINVAL;
}
if (!app_exists) {
p.application_metadata[app] = {};
}
ss << "enabled application '" << app << "' on pool '" << pool_name << "'";
} else if (boost::algorithm::ends_with(prefix, "disable")) {
bool force = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", force);
if (!force) {
ss << "Are you SURE? Disabling an application within a pool might result "
<< "in loss of application functionality; pass "
<< "--yes-i-really-mean-it to proceed anyway";
return -EPERM;
}
if (!app_exists) {
ss << "application '" << app << "' is not enabled on pool '" << pool_name
<< "'";
return 0; // idempotent
}
p.application_metadata.erase(app);
ss << "disable application '" << app << "' on pool '" << pool_name << "'";
} else if (boost::algorithm::ends_with(prefix, "set")) {
if (p.is_tier()) {
ss << "application metadata must be set on base tier";
return -EINVAL;
}
if (!app_exists) {
ss << "application '" << app << "' is not enabled on pool '" << pool_name
<< "'";
return -ENOENT;
}
string key;
cmd_getval(cmdmap, "key", key);
if (key.empty()) {
ss << "key must be provided";
return -EINVAL;
}
auto &app_keys = p.application_metadata[app];
if (app_keys.count(key) == 0 &&
app_keys.size() >= MAX_POOL_APPLICATION_KEYS) {
ss << "too many keys set for application '" << app << "' on pool '"
<< pool_name << "'; max " << MAX_POOL_APPLICATION_KEYS;
return -EINVAL;
}
if (key.length() > MAX_POOL_APPLICATION_LENGTH) {
ss << "key '" << app << "' too long; max length "
<< MAX_POOL_APPLICATION_LENGTH;
return -EINVAL;
}
string value;
cmd_getval(cmdmap, "value", value);
if (value.length() > MAX_POOL_APPLICATION_LENGTH) {
ss << "value '" << value << "' too long; max length "
<< MAX_POOL_APPLICATION_LENGTH;
return -EINVAL;
}
p.application_metadata[app][key] = value;
ss << "set application '" << app << "' key '" << key << "' to '"
<< value << "' on pool '" << pool_name << "'";
} else if (boost::algorithm::ends_with(prefix, "rm")) {
if (!app_exists) {
ss << "application '" << app << "' is not enabled on pool '" << pool_name
<< "'";
return -ENOENT;
}
string key;
cmd_getval(cmdmap, "key", key);
auto it = p.application_metadata[app].find(key);
if (it == p.application_metadata[app].end()) {
ss << "application '" << app << "' on pool '" << pool_name
<< "' does not have key '" << key << "'";
return 0; // idempotent
}
p.application_metadata[app].erase(it);
ss << "removed application '" << app << "' key '" << key << "' on pool '"
<< pool_name << "'";
} else {
ceph_abort();
}
if (preparing) {
p.last_change = pending_inc.epoch;
pending_inc.new_pools[pool] = p;
}
// Because we fell through this far, we didn't hit no-op cases,
// so pool was definitely modified
if (modified != nullptr) {
*modified = true;
}
return 0;
}
int OSDMonitor::_prepare_command_osd_crush_remove(
CrushWrapper &newcrush,
int32_t id,
int32_t ancestor,
bool has_ancestor,
bool unlink_only)
{
int err = 0;
if (has_ancestor) {
err = newcrush.remove_item_under(cct, id, ancestor,
unlink_only);
} else {
err = newcrush.remove_item(cct, id, unlink_only);
}
return err;
}
void OSDMonitor::do_osd_crush_remove(CrushWrapper& newcrush)
{
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
}
int OSDMonitor::prepare_command_osd_crush_remove(
CrushWrapper &newcrush,
int32_t id,
int32_t ancestor,
bool has_ancestor,
bool unlink_only)
{
int err = _prepare_command_osd_crush_remove(
newcrush, id, ancestor,
has_ancestor, unlink_only);
if (err < 0)
return err;
ceph_assert(err == 0);
do_osd_crush_remove(newcrush);
return 0;
}
int OSDMonitor::prepare_command_osd_remove(int32_t id)
{
if (osdmap.is_up(id)) {
return -EBUSY;
}
pending_inc.new_state[id] = osdmap.get_state(id);
pending_inc.new_uuid[id] = uuid_d();
pending_metadata_rm.insert(id);
pending_metadata.erase(id);
return 0;
}
int32_t OSDMonitor::_allocate_osd_id(int32_t* existing_id)
{
ceph_assert(existing_id);
*existing_id = -1;
for (int32_t i = 0; i < osdmap.get_max_osd(); ++i) {
if (!osdmap.exists(i) &&
pending_inc.new_up_client.count(i) == 0 &&
(pending_inc.new_state.count(i) == 0 ||
(pending_inc.new_state[i] & CEPH_OSD_EXISTS) == 0)) {
*existing_id = i;
return -1;
}
}
if (pending_inc.new_max_osd < 0) {
return osdmap.get_max_osd();
}
return pending_inc.new_max_osd;
}
void OSDMonitor::do_osd_create(
const int32_t id,
const uuid_d& uuid,
const string& device_class,
int32_t* new_id)
{
dout(10) << __func__ << " uuid " << uuid << dendl;
ceph_assert(new_id);
// We presume validation has been performed prior to calling this
// function. We assert with prejudice.
int32_t allocated_id = -1; // declare here so we can jump
int32_t existing_id = -1;
if (!uuid.is_zero()) {
existing_id = osdmap.identify_osd(uuid);
if (existing_id >= 0) {
ceph_assert(id < 0 || id == existing_id);
*new_id = existing_id;
goto out;
} else if (id >= 0) {
// uuid does not exist, and id has been provided, so just create
// the new osd.id
*new_id = id;
goto out;
}
}
// allocate a new id
allocated_id = _allocate_osd_id(&existing_id);
dout(10) << __func__ << " allocated id " << allocated_id
<< " existing id " << existing_id << dendl;
if (existing_id >= 0) {
ceph_assert(existing_id < osdmap.get_max_osd());
ceph_assert(allocated_id < 0);
*new_id = existing_id;
} else if (allocated_id >= 0) {
ceph_assert(existing_id < 0);
// raise max_osd
if (pending_inc.new_max_osd < 0) {
pending_inc.new_max_osd = osdmap.get_max_osd() + 1;
} else {
++pending_inc.new_max_osd;
}
*new_id = pending_inc.new_max_osd - 1;
ceph_assert(*new_id == allocated_id);
} else {
ceph_abort_msg("unexpected condition");
}
out:
if (device_class.size()) {
CrushWrapper newcrush = _get_pending_crush();
if (newcrush.get_max_devices() < *new_id + 1) {
newcrush.set_max_devices(*new_id + 1);
}
string name = string("osd.") + stringify(*new_id);
if (!newcrush.item_exists(*new_id)) {
newcrush.set_item_name(*new_id, name);
}
ostringstream ss;
int r = newcrush.update_device_class(*new_id, device_class, name, &ss);
if (r < 0) {
derr << __func__ << " failed to set " << name << " device_class "
<< device_class << ": " << cpp_strerror(r) << " - " << ss.str()
<< dendl;
// non-fatal... this might be a replay and we want to be idempotent.
} else {
dout(20) << __func__ << " set " << name << " device_class " << device_class
<< dendl;
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
}
} else {
dout(20) << __func__ << " no device_class" << dendl;
}
dout(10) << __func__ << " using id " << *new_id << dendl;
if (osdmap.get_max_osd() <= *new_id && pending_inc.new_max_osd <= *new_id) {
pending_inc.new_max_osd = *new_id + 1;
}
pending_inc.new_weight[*new_id] = CEPH_OSD_IN;
// do not set EXISTS; OSDMap::set_weight, called by apply_incremental, will
// set it for us. (ugh.)
pending_inc.new_state[*new_id] |= CEPH_OSD_NEW;
if (!uuid.is_zero())
pending_inc.new_uuid[*new_id] = uuid;
}
int OSDMonitor::validate_osd_create(
const int32_t id,
const uuid_d& uuid,
const bool check_osd_exists,
int32_t* existing_id,
stringstream& ss)
{
dout(10) << __func__ << " id " << id << " uuid " << uuid
<< " check_osd_exists " << check_osd_exists << dendl;
ceph_assert(existing_id);
if (id < 0 && uuid.is_zero()) {
// we have nothing to validate
*existing_id = -1;
return 0;
} else if (uuid.is_zero()) {
// we have an id but we will ignore it - because that's what
// `osd create` does.
return 0;
}
/*
* This function will be used to validate whether we are able to
* create a new osd when the `uuid` is specified.
*
* It will be used by both `osd create` and `osd new`, as the checks
* are basically the same when it pertains to osd id and uuid validation.
* However, `osd create` presumes an `uuid` is optional, for legacy
* reasons, while `osd new` requires the `uuid` to be provided. This
* means that `osd create` will not be idempotent if an `uuid` is not
* provided, but we will always guarantee the idempotency of `osd new`.
*/
ceph_assert(!uuid.is_zero());
if (pending_inc.identify_osd(uuid) >= 0) {
// osd is about to exist
return -EAGAIN;
}
int32_t i = osdmap.identify_osd(uuid);
if (i >= 0) {
// osd already exists
if (id >= 0 && i != id) {
ss << "uuid " << uuid << " already in use for different id " << i;
return -EEXIST;
}
// return a positive errno to distinguish between a blocking error
// and an error we consider to not be a problem (i.e., this would be
// an idempotent operation).
*existing_id = i;
return EEXIST;
}
// i < 0
if (id >= 0) {
if (pending_inc.new_state.count(id)) {
// osd is about to exist
return -EAGAIN;
}
// we may not care if an osd exists if we are recreating a previously
// destroyed osd.
if (check_osd_exists && osdmap.exists(id)) {
ss << "id " << id << " already in use and does not match uuid "
<< uuid;
return -EINVAL;
}
}
return 0;
}
int OSDMonitor::prepare_command_osd_create(
const int32_t id,
const uuid_d& uuid,
int32_t* existing_id,
stringstream& ss)
{
dout(10) << __func__ << " id " << id << " uuid " << uuid << dendl;
ceph_assert(existing_id);
if (osdmap.is_destroyed(id)) {
ss << "ceph osd create has been deprecated. Please use ceph osd new "
"instead.";
return -EINVAL;
}
if (uuid.is_zero()) {
dout(10) << __func__ << " no uuid; assuming legacy `osd create`" << dendl;
}
return validate_osd_create(id, uuid, true, existing_id, ss);
}
int OSDMonitor::prepare_command_osd_new(
MonOpRequestRef op,
const cmdmap_t& cmdmap,
const map<string,string>& params,
stringstream &ss,
Formatter *f)
{
uuid_d uuid;
string uuidstr;
int64_t id = -1;
ceph_assert(paxos.is_plugged());
dout(10) << __func__ << " " << op << dendl;
/* validate command. abort now if something's wrong. */
/* `osd new` will expect a `uuid` to be supplied; `id` is optional.
*
* If `id` is not specified, we will identify any existing osd based
* on `uuid`. Operation will be idempotent iff secrets match.
*
* If `id` is specified, we will identify any existing osd based on
* `uuid` and match against `id`. If they match, operation will be
* idempotent iff secrets match.
*
* `-i secrets.json` will be optional. If supplied, will be used
* to check for idempotency when `id` and `uuid` match.
*
* If `id` is not specified, and `uuid` does not exist, an id will
* be found or allocated for the osd.
*
* If `id` is specified, and the osd has been previously marked
* as destroyed, then the `id` will be reused.
*/
if (!cmd_getval(cmdmap, "uuid", uuidstr)) {
ss << "requires the OSD's UUID to be specified.";
return -EINVAL;
} else if (!uuid.parse(uuidstr.c_str())) {
ss << "invalid UUID value '" << uuidstr << "'.";
return -EINVAL;
}
if (cmd_getval(cmdmap, "id", id) &&
(id < 0)) {
ss << "invalid OSD id; must be greater or equal than zero.";
return -EINVAL;
}
// are we running an `osd create`-like command, or recreating
// a previously destroyed osd?
bool is_recreate_destroyed = (id >= 0 && osdmap.is_destroyed(id));
// we will care about `id` to assess whether osd is `destroyed`, or
// to create a new osd.
// we will need an `id` by the time we reach auth.
int32_t existing_id = -1;
int err = validate_osd_create(id, uuid, !is_recreate_destroyed,
&existing_id, ss);
bool may_be_idempotent = false;
if (err == EEXIST) {
// this is idempotent from the osdmon's point-of-view
may_be_idempotent = true;
ceph_assert(existing_id >= 0);
id = existing_id;
} else if (err < 0) {
return err;
}
if (!may_be_idempotent) {
// idempotency is out of the window. We are either creating a new
// osd or recreating a destroyed osd.
//
// We now need to figure out if we have an `id` (and if it's valid),
// of find an `id` if we don't have one.
// NOTE: we need to consider the case where the `id` is specified for
// `osd create`, and we must honor it. So this means checking if
// the `id` is destroyed, and if so assume the destroy; otherwise,
// check if it `exists` - in which case we complain about not being
// `destroyed`. In the end, if nothing fails, we must allow the
// creation, so that we are compatible with `create`.
if (id >= 0 && osdmap.exists(id) && !osdmap.is_destroyed(id)) {
dout(10) << __func__ << " osd." << id << " isn't destroyed" << dendl;
ss << "OSD " << id << " has not yet been destroyed";
return -EINVAL;
} else if (id < 0) {
// find an `id`
id = _allocate_osd_id(&existing_id);
if (id < 0) {
ceph_assert(existing_id >= 0);
id = existing_id;
}
dout(10) << __func__ << " found id " << id << " to use" << dendl;
} else if (id >= 0 && osdmap.is_destroyed(id)) {
dout(10) << __func__ << " recreating osd." << id << dendl;
} else {
dout(10) << __func__ << " creating new osd." << id << dendl;
}
} else {
ceph_assert(id >= 0);
ceph_assert(osdmap.exists(id));
}
// we are now able to either create a brand new osd or reuse an existing
// osd that has been previously destroyed.
dout(10) << __func__ << " id " << id << " uuid " << uuid << dendl;
if (may_be_idempotent && params.empty()) {
// nothing to do, really.
dout(10) << __func__ << " idempotent and no params -- no op." << dendl;
ceph_assert(id >= 0);
if (f) {
f->open_object_section("created_osd");
f->dump_int("osdid", id);
f->close_section();
} else {
ss << id;
}
return EEXIST;
}
string device_class;
auto p = params.find("crush_device_class");
if (p != params.end()) {
device_class = p->second;
dout(20) << __func__ << " device_class will be " << device_class << dendl;
}
string cephx_secret, lockbox_secret, dmcrypt_key;
bool has_lockbox = false;
bool has_secrets = params.count("cephx_secret")
|| params.count("cephx_lockbox_secret")
|| params.count("dmcrypt_key");
KVMonitor *svc = nullptr;
AuthMonitor::auth_entity_t cephx_entity, lockbox_entity;
if (has_secrets) {
if (params.count("cephx_secret") == 0) {
ss << "requires a cephx secret.";
return -EINVAL;
}
cephx_secret = params.at("cephx_secret");
bool has_lockbox_secret = (params.count("cephx_lockbox_secret") > 0);
bool has_dmcrypt_key = (params.count("dmcrypt_key") > 0);
dout(10) << __func__ << " has lockbox " << has_lockbox_secret
<< " dmcrypt " << has_dmcrypt_key << dendl;
if (has_lockbox_secret && has_dmcrypt_key) {
has_lockbox = true;
lockbox_secret = params.at("cephx_lockbox_secret");
dmcrypt_key = params.at("dmcrypt_key");
} else if (!has_lockbox_secret != !has_dmcrypt_key) {
ss << "requires both a cephx lockbox secret and a dm-crypt key.";
return -EINVAL;
}
dout(10) << __func__ << " validate secrets using osd id " << id << dendl;
err = mon.authmon()->validate_osd_new(id, uuid,
cephx_secret,
lockbox_secret,
cephx_entity,
lockbox_entity,
ss);
if (err < 0) {
return err;
} else if (may_be_idempotent && err != EEXIST) {
// for this to be idempotent, `id` should already be >= 0; no need
// to use validate_id.
ceph_assert(id >= 0);
ss << "osd." << id << " exists but secrets do not match";
return -EEXIST;
}
if (has_lockbox) {
svc = mon.kvmon();
err = svc->validate_osd_new(uuid, dmcrypt_key, ss);
if (err < 0) {
return err;
} else if (may_be_idempotent && err != EEXIST) {
ceph_assert(id >= 0);
ss << "osd." << id << " exists but dm-crypt key does not match.";
return -EEXIST;
}
}
}
ceph_assert(!has_secrets || !cephx_secret.empty());
ceph_assert(!has_lockbox || !lockbox_secret.empty());
if (may_be_idempotent) {
// we have nothing to do for either the osdmon or the authmon,
// and we have no lockbox - so the config key service will not be
// touched. This is therefore an idempotent operation, and we can
// just return right away.
dout(10) << __func__ << " idempotent -- no op." << dendl;
ceph_assert(id >= 0);
if (f) {
f->open_object_section("created_osd");
f->dump_int("osdid", id);
f->close_section();
} else {
ss << id;
}
return EEXIST;
}
ceph_assert(!may_be_idempotent);
// perform updates.
if (has_secrets) {
ceph_assert(!cephx_secret.empty());
ceph_assert((lockbox_secret.empty() && dmcrypt_key.empty()) ||
(!lockbox_secret.empty() && !dmcrypt_key.empty()));
err = mon.authmon()->do_osd_new(cephx_entity,
lockbox_entity,
has_lockbox);
ceph_assert(0 == err);
if (has_lockbox) {
ceph_assert(nullptr != svc);
svc->do_osd_new(uuid, dmcrypt_key);
}
}
if (is_recreate_destroyed) {
ceph_assert(id >= 0);
ceph_assert(osdmap.is_destroyed(id));
pending_inc.new_state[id] |= CEPH_OSD_DESTROYED;
if ((osdmap.get_state(id) & CEPH_OSD_NEW) == 0) {
pending_inc.new_state[id] |= CEPH_OSD_NEW;
}
if (osdmap.get_state(id) & CEPH_OSD_UP) {
// due to http://tracker.ceph.com/issues/20751 some clusters may
// have UP set for non-existent OSDs; make sure it is cleared
// for a newly created osd.
pending_inc.new_state[id] |= CEPH_OSD_UP;
}
pending_inc.new_uuid[id] = uuid;
} else {
ceph_assert(id >= 0);
int32_t new_id = -1;
do_osd_create(id, uuid, device_class, &new_id);
ceph_assert(new_id >= 0);
ceph_assert(id == new_id);
}
if (f) {
f->open_object_section("created_osd");
f->dump_int("osdid", id);
f->close_section();
} else {
ss << id;
}
return 0;
}
bool OSDMonitor::prepare_command(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MMonCommand>();
stringstream ss;
cmdmap_t cmdmap;
if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) {
string rs = ss.str();
mon.reply_command(op, -EINVAL, rs, get_last_committed());
return true;
}
MonSession *session = op->get_session();
if (!session) {
derr << __func__ << " no session" << dendl;
mon.reply_command(op, -EACCES, "access denied", get_last_committed());
return true;
}
return prepare_command_impl(op, cmdmap);
}
static int parse_reweights(CephContext *cct,
const cmdmap_t& cmdmap,
const OSDMap& osdmap,
map<int32_t, uint32_t>* weights)
{
string weights_str;
if (!cmd_getval(cmdmap, "weights", weights_str)) {
return -EINVAL;
}
std::replace(begin(weights_str), end(weights_str), '\'', '"');
json_spirit::mValue json_value;
if (!json_spirit::read(weights_str, json_value)) {
return -EINVAL;
}
if (json_value.type() != json_spirit::obj_type) {
return -EINVAL;
}
const auto obj = json_value.get_obj();
try {
for (auto& osd_weight : obj) {
auto osd_id = std::stoi(osd_weight.first);
if (!osdmap.exists(osd_id)) {
return -ENOENT;
}
if (osd_weight.second.type() != json_spirit::str_type) {
return -EINVAL;
}
auto weight = std::stoul(osd_weight.second.get_str());
weights->insert({osd_id, weight});
}
} catch (const std::logic_error& e) {
return -EINVAL;
}
return 0;
}
int OSDMonitor::prepare_command_osd_destroy(
int32_t id,
stringstream& ss)
{
ceph_assert(paxos.is_plugged());
// we check if the osd exists for the benefit of `osd purge`, which may
// have previously removed the osd. If the osd does not exist, return
// -ENOENT to convey this, and let the caller deal with it.
//
// we presume that all auth secrets and config keys were removed prior
// to this command being called. if they exist by now, we also assume
// they must have been created by some other command and do not pertain
// to this non-existent osd.
if (!osdmap.exists(id)) {
dout(10) << __func__ << " osd." << id << " does not exist." << dendl;
return -ENOENT;
}
uuid_d uuid = osdmap.get_uuid(id);
dout(10) << __func__ << " destroying osd." << id
<< " uuid " << uuid << dendl;
// if it has been destroyed, we assume our work here is done.
if (osdmap.is_destroyed(id)) {
ss << "destroyed osd." << id;
return 0;
}
EntityName cephx_entity, lockbox_entity;
bool idempotent_auth = false, idempotent_cks = false;
int err = mon.authmon()->validate_osd_destroy(id, uuid,
cephx_entity,
lockbox_entity,
ss);
if (err < 0) {
if (err == -ENOENT) {
idempotent_auth = true;
} else {
return err;
}
}
auto svc = mon.kvmon();
err = svc->validate_osd_destroy(id, uuid);
if (err < 0) {
ceph_assert(err == -ENOENT);
err = 0;
idempotent_cks = true;
}
if (!idempotent_auth) {
err = mon.authmon()->do_osd_destroy(cephx_entity, lockbox_entity);
ceph_assert(0 == err);
}
if (!idempotent_cks) {
svc->do_osd_destroy(id, uuid);
}
pending_inc.new_state[id] = CEPH_OSD_DESTROYED;
pending_inc.new_uuid[id] = uuid_d();
// we can only propose_pending() once per service, otherwise we'll be
// defying PaxosService and all laws of nature. Therefore, as we may
// be used during 'osd purge', let's keep the caller responsible for
// proposing.
ceph_assert(err == 0);
return 0;
}
int OSDMonitor::prepare_command_osd_purge(
int32_t id,
stringstream& ss)
{
ceph_assert(paxos.is_plugged());
dout(10) << __func__ << " purging osd." << id << dendl;
ceph_assert(!osdmap.is_up(id));
/*
* This may look a bit weird, but this is what's going to happen:
*
* 1. we make sure that removing from crush works
* 2. we call `prepare_command_osd_destroy()`. If it returns an
* error, then we abort the whole operation, as no updates
* have been made. However, we this function will have
* side-effects, thus we need to make sure that all operations
* performed henceforth will *always* succeed.
* 3. we call `prepare_command_osd_remove()`. Although this
* function can return an error, it currently only checks if the
* osd is up - and we have made sure that it is not so, so there
* is no conflict, and it is effectively an update.
* 4. finally, we call `do_osd_crush_remove()`, which will perform
* the crush update we delayed from before.
*/
CrushWrapper newcrush = _get_pending_crush();
bool may_be_idempotent = false;
int err = _prepare_command_osd_crush_remove(newcrush, id, 0, false, false);
if (err == -ENOENT) {
err = 0;
may_be_idempotent = true;
} else if (err < 0) {
ss << "error removing osd." << id << " from crush";
return err;
}
// no point destroying the osd again if it has already been marked destroyed
if (!osdmap.is_destroyed(id)) {
err = prepare_command_osd_destroy(id, ss);
if (err < 0) {
if (err == -ENOENT) {
err = 0;
} else {
return err;
}
} else {
may_be_idempotent = false;
}
}
ceph_assert(0 == err);
if (may_be_idempotent && !osdmap.exists(id)) {
dout(10) << __func__ << " osd." << id << " does not exist and "
<< "we are idempotent." << dendl;
return -ENOENT;
}
err = prepare_command_osd_remove(id);
// we should not be busy, as we should have made sure this id is not up.
ceph_assert(0 == err);
do_osd_crush_remove(newcrush);
return 0;
}
int OSDMonitor::parse_pgid(const cmdmap_t& cmdmap, stringstream &ss,
/* out */ pg_t &pgid, std::optional<string> pgids) {
string pgidstr;
if (!cmd_getval(cmdmap, "pgid", pgidstr)) {
ss << "unable to parse 'pgid' value '"
<< cmd_vartype_stringify(cmdmap.at("pgid")) << "'";
return -EINVAL;
}
if (!pgid.parse(pgidstr.c_str())) {
ss << "invalid pgid '" << pgidstr << "'";
return -EINVAL;
}
if (!osdmap.pg_exists(pgid)) {
ss << "pgid '" << pgid << "' does not exist";
return -ENOENT;
}
if (pgids.has_value())
pgids.value() = pgidstr;
return 0;
}
bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
const cmdmap_t& cmdmap)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MMonCommand>();
bool ret = false;
stringstream ss;
string rs;
bufferlist rdata;
int err = 0;
string format = cmd_getval_or<string>(cmdmap, "format", "plain");
boost::scoped_ptr<Formatter> f(Formatter::create(format));
string prefix;
cmd_getval(cmdmap, "prefix", prefix);
int64_t osdid;
string osd_name;
bool osdid_present = false;
if (prefix != "osd pg-temp" &&
prefix != "osd pg-upmap" &&
prefix != "osd pg-upmap-items") { // avoid commands with non-int id arg
osdid_present = cmd_getval(cmdmap, "id", osdid);
}
if (osdid_present) {
ostringstream oss;
oss << "osd." << osdid;
osd_name = oss.str();
}
// Even if there's a pending state with changes that could affect
// a command, considering that said state isn't yet committed, we
// just don't care about those changes if the command currently being
// handled acts as a no-op against the current committed state.
// In a nutshell, we assume this command happens *before*.
//
// Let me make this clearer:
//
// - If we have only one client, and that client issues some
// operation that would conflict with this operation but is
// still on the pending state, then we would be sure that said
// operation wouldn't have returned yet, so the client wouldn't
// issue this operation (unless the client didn't wait for the
// operation to finish, and that would be the client's own fault).
//
// - If we have more than one client, each client will observe
// whatever is the state at the moment of the commit. So, if we
// have two clients, one issuing an unlink and another issuing a
// link, and if the link happens while the unlink is still on the
// pending state, from the link's point-of-view this is a no-op.
// If different clients are issuing conflicting operations and
// they care about that, then the clients should make sure they
// enforce some kind of concurrency mechanism -- from our
// perspective that's what Douglas Adams would call an SEP.
//
// This should be used as a general guideline for most commands handled
// in this function. Adapt as you see fit, but please bear in mind that
// this is the expected behavior.
if (prefix == "osd setcrushmap" ||
(prefix == "osd crush set" && !osdid_present)) {
if (pending_inc.crush.length()) {
dout(10) << __func__ << " waiting for pending crush update " << dendl;
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
dout(10) << "prepare_command setting new crush map" << dendl;
bufferlist data(m->get_data());
CrushWrapper crush;
try {
auto bl = data.cbegin();
crush.decode(bl);
}
catch (const std::exception &e) {
err = -EINVAL;
ss << "Failed to parse crushmap: " << e.what();
goto reply;
}
int64_t prior_version = 0;
if (cmd_getval(cmdmap, "prior_version", prior_version)) {
if (prior_version == osdmap.get_crush_version() - 1) {
// see if we are a resend of the last update. this is imperfect
// (multiple racing updaters may not both get reliable success)
// but we expect crush updaters (via this interface) to be rare-ish.
bufferlist current, proposed;
osdmap.crush->encode(current, mon.get_quorum_con_features());
crush.encode(proposed, mon.get_quorum_con_features());
if (current.contents_equal(proposed)) {
dout(10) << __func__
<< " proposed matches current and version equals previous"
<< dendl;
err = 0;
ss << osdmap.get_crush_version();
goto reply;
}
}
if (prior_version != osdmap.get_crush_version()) {
err = -EPERM;
ss << "prior_version " << prior_version << " != crush version "
<< osdmap.get_crush_version();
goto reply;
}
}
if (!validate_crush_against_features(&crush, ss)) {
err = -EINVAL;
goto reply;
}
err = osdmap.validate_crush_rules(&crush, &ss);
if (err < 0) {
goto reply;
}
if (g_conf()->mon_osd_crush_smoke_test) {
// sanity check: test some inputs to make sure this map isn't
// totally broken
dout(10) << " testing map" << dendl;
stringstream ess;
CrushTester tester(crush, ess);
tester.set_min_x(0);
tester.set_max_x(50);
tester.set_num_rep(3); // arbitrary
auto start = ceph::coarse_mono_clock::now();
int r = tester.test_with_fork(cct, g_conf()->mon_lease);
auto duration = ceph::coarse_mono_clock::now() - start;
if (r < 0) {
dout(10) << " tester.test_with_fork returns " << r
<< ": " << ess.str() << dendl;
ss << "crush smoke test failed with " << r << ": " << ess.str();
err = r;
goto reply;
}
dout(10) << __func__ << " crush somke test duration: "
<< duration << ", result: " << ess.str() << dendl;
}
pending_inc.crush = data;
ss << osdmap.get_crush_version() + 1;
goto update;
} else if (prefix == "osd crush set-all-straw-buckets-to-straw2") {
CrushWrapper newcrush = _get_pending_crush();
for (int b = 0; b < newcrush.get_max_buckets(); ++b) {
int bid = -1 - b;
if (newcrush.bucket_exists(bid) &&
newcrush.get_bucket_alg(bid) == CRUSH_BUCKET_STRAW) {
dout(20) << " bucket " << bid << " is straw, can convert" << dendl;
newcrush.bucket_set_alg(bid, CRUSH_BUCKET_STRAW2);
}
}
if (!validate_crush_against_features(&newcrush, ss)) {
err = -EINVAL;
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush set-device-class") {
string device_class;
if (!cmd_getval(cmdmap, "class", device_class)) {
err = -EINVAL; // no value!
goto reply;
}
bool stop = false;
vector<string> idvec;
cmd_getval(cmdmap, "ids", idvec);
CrushWrapper newcrush = _get_pending_crush();
set<int> updated;
for (unsigned j = 0; j < idvec.size() && !stop; j++) {
set<int> osds;
// wildcard?
if (j == 0 &&
(idvec[0] == "any" || idvec[0] == "all" || idvec[0] == "*")) {
osdmap.get_all_osds(osds);
stop = true;
} else {
// try traditional single osd way
long osd = parse_osd_id(idvec[j].c_str(), &ss);
if (osd < 0) {
// ss has reason for failure
ss << ", unable to parse osd id:\"" << idvec[j] << "\". ";
err = -EINVAL;
continue;
}
osds.insert(osd);
}
for (auto &osd : osds) {
if (!osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist. ";
continue;
}
ostringstream oss;
oss << "osd." << osd;
string name = oss.str();
if (newcrush.get_max_devices() < osd + 1) {
newcrush.set_max_devices(osd + 1);
}
string action;
if (newcrush.item_exists(osd)) {
action = "updating";
} else {
action = "creating";
newcrush.set_item_name(osd, name);
}
dout(5) << action << " crush item id " << osd << " name '" << name
<< "' device_class '" << device_class << "'"
<< dendl;
err = newcrush.update_device_class(osd, device_class, name, &ss);
if (err < 0) {
goto reply;
}
if (err == 0 && !_have_pending_crush()) {
if (!stop) {
// for single osd only, wildcard makes too much noise
ss << "set-device-class item id " << osd << " name '" << name
<< "' device_class '" << device_class << "': no change. ";
}
} else {
updated.insert(osd);
}
}
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "set osd(s) " << updated << " to class '" << device_class << "'";
getline(ss, rs);
wait_for_finished_proposal(
op,
new Monitor::C_Command(mon,op, 0, rs, get_last_committed() + 1));
return true;
} else if (prefix == "osd crush rm-device-class") {
bool stop = false;
vector<string> idvec;
cmd_getval(cmdmap, "ids", idvec);
CrushWrapper newcrush = _get_pending_crush();
set<int> updated;
for (unsigned j = 0; j < idvec.size() && !stop; j++) {
set<int> osds;
// wildcard?
if (j == 0 &&
(idvec[0] == "any" || idvec[0] == "all" || idvec[0] == "*")) {
osdmap.get_all_osds(osds);
stop = true;
} else {
// try traditional single osd way
long osd = parse_osd_id(idvec[j].c_str(), &ss);
if (osd < 0) {
// ss has reason for failure
ss << ", unable to parse osd id:\"" << idvec[j] << "\". ";
err = -EINVAL;
goto reply;
}
osds.insert(osd);
}
for (auto &osd : osds) {
if (!osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist. ";
continue;
}
auto class_name = newcrush.get_item_class(osd);
if (!class_name) {
ss << "osd." << osd << " belongs to no class, ";
continue;
}
// note that we do not verify if class_is_in_use here
// in case the device is misclassified and user wants
// to overridely reset...
err = newcrush.remove_device_class(cct, osd, &ss);
if (err < 0) {
// ss has reason for failure
goto reply;
}
updated.insert(osd);
}
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "done removing class of osd(s): " << updated;
getline(ss, rs);
wait_for_finished_proposal(
op,
new Monitor::C_Command(mon,op, 0, rs, get_last_committed() + 1));
return true;
} else if (prefix == "osd crush class create") {
string device_class;
if (!cmd_getval(cmdmap, "class", device_class)) {
err = -EINVAL; // no value!
goto reply;
}
if (osdmap.require_osd_release < ceph_release_t::luminous) {
ss << "you must complete the upgrade and 'ceph osd require-osd-release "
<< "luminous' before using crush device classes";
err = -EPERM;
goto reply;
}
if (!_have_pending_crush() &&
_get_stable_crush().class_exists(device_class)) {
ss << "class '" << device_class << "' already exists";
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (newcrush.class_exists(device_class)) {
ss << "class '" << device_class << "' already exists";
goto update;
}
int class_id = newcrush.get_or_create_class_id(device_class);
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "created class " << device_class << " with id " << class_id
<< " to crush map";
goto update;
} else if (prefix == "osd crush class rm") {
string device_class;
if (!cmd_getval(cmdmap, "class", device_class)) {
err = -EINVAL; // no value!
goto reply;
}
if (osdmap.require_osd_release < ceph_release_t::luminous) {
ss << "you must complete the upgrade and 'ceph osd require-osd-release "
<< "luminous' before using crush device classes";
err = -EPERM;
goto reply;
}
if (!osdmap.crush->class_exists(device_class)) {
err = 0;
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (!newcrush.class_exists(device_class)) {
err = 0; // make command idempotent
goto wait;
}
int class_id = newcrush.get_class_id(device_class);
stringstream ts;
if (newcrush.class_is_in_use(class_id, &ts)) {
err = -EBUSY;
ss << "class '" << device_class << "' " << ts.str();
goto reply;
}
// check if class is used by any erasure-code-profiles
mempool::osdmap::map<string,map<string,string>> old_ec_profiles =
osdmap.get_erasure_code_profiles();
auto ec_profiles = pending_inc.get_erasure_code_profiles();
#ifdef HAVE_STDLIB_MAP_SPLICING
ec_profiles.merge(old_ec_profiles);
#else
ec_profiles.insert(make_move_iterator(begin(old_ec_profiles)),
make_move_iterator(end(old_ec_profiles)));
#endif
list<string> referenced_by;
for (auto &i: ec_profiles) {
for (auto &j: i.second) {
if ("crush-device-class" == j.first && device_class == j.second) {
referenced_by.push_back(i.first);
}
}
}
if (!referenced_by.empty()) {
err = -EBUSY;
ss << "class '" << device_class
<< "' is still referenced by erasure-code-profile(s): " << referenced_by;
goto reply;
}
set<int> osds;
newcrush.get_devices_by_class(device_class, &osds);
for (auto& p: osds) {
err = newcrush.remove_device_class(cct, p, &ss);
if (err < 0) {
// ss has reason for failure
goto reply;
}
}
if (osds.empty()) {
// empty class, remove directly
err = newcrush.remove_class_name(device_class);
if (err < 0) {
ss << "class '" << device_class << "' cannot be removed '"
<< cpp_strerror(err) << "'";
goto reply;
}
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "removed class " << device_class << " with id " << class_id
<< " from crush map";
goto update;
} else if (prefix == "osd crush class rename") {
string srcname, dstname;
if (!cmd_getval(cmdmap, "srcname", srcname)) {
err = -EINVAL;
goto reply;
}
if (!cmd_getval(cmdmap, "dstname", dstname)) {
err = -EINVAL;
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (!newcrush.class_exists(srcname) && newcrush.class_exists(dstname)) {
// suppose this is a replay and return success
// so command is idempotent
ss << "already renamed to '" << dstname << "'";
err = 0;
goto reply;
}
err = newcrush.rename_class(srcname, dstname);
if (err < 0) {
ss << "fail to rename '" << srcname << "' to '" << dstname << "' : "
<< cpp_strerror(err);
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "rename class '" << srcname << "' to '" << dstname << "'";
goto update;
} else if (prefix == "osd crush add-bucket") {
// os crush add-bucket <name> <type>
string name, typestr;
vector<string> argvec;
cmd_getval(cmdmap, "name", name);
cmd_getval(cmdmap, "type", typestr);
cmd_getval(cmdmap, "args", argvec);
map<string,string> loc;
if (!argvec.empty()) {
CrushWrapper::parse_loc_map(argvec, &loc);
dout(0) << "will create and move bucket '" << name
<< "' to location " << loc << dendl;
}
if (!_have_pending_crush() &&
_get_stable_crush().name_exists(name)) {
ss << "bucket '" << name << "' already exists";
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (newcrush.name_exists(name)) {
ss << "bucket '" << name << "' already exists";
goto update;
}
int type = newcrush.get_type_id(typestr);
if (type < 0) {
ss << "type '" << typestr << "' does not exist";
err = -EINVAL;
goto reply;
}
if (type == 0) {
ss << "type '" << typestr << "' is for devices, not buckets";
err = -EINVAL;
goto reply;
}
int bucketno;
err = newcrush.add_bucket(0, 0,
CRUSH_HASH_DEFAULT, type, 0, NULL,
NULL, &bucketno);
if (err < 0) {
ss << "add_bucket error: '" << cpp_strerror(err) << "'";
goto reply;
}
err = newcrush.set_item_name(bucketno, name);
if (err < 0) {
ss << "error setting bucket name to '" << name << "'";
goto reply;
}
if (!loc.empty()) {
if (!newcrush.check_item_loc(cct, bucketno, loc,
(int *)NULL)) {
err = newcrush.move_bucket(cct, bucketno, loc);
if (err < 0) {
ss << "error moving bucket '" << name << "' to location " << loc;
goto reply;
}
} else {
ss << "no need to move item id " << bucketno << " name '" << name
<< "' to location " << loc << " in crush map";
}
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
if (loc.empty()) {
ss << "added bucket " << name << " type " << typestr
<< " to crush map";
} else {
ss << "added bucket " << name << " type " << typestr
<< " to location " << loc;
}
goto update;
} else if (prefix == "osd crush rename-bucket") {
string srcname, dstname;
cmd_getval(cmdmap, "srcname", srcname);
cmd_getval(cmdmap, "dstname", dstname);
err = crush_rename_bucket(srcname, dstname, &ss);
if (err == -EALREADY) // equivalent to success for idempotency
err = 0;
if (err)
goto reply;
else
goto update;
} else if (prefix == "osd crush weight-set create" ||
prefix == "osd crush weight-set create-compat") {
if (_have_pending_crush()) {
dout(10) << " first waiting for pending crush changes to commit" << dendl;
goto wait;
}
CrushWrapper newcrush = _get_pending_crush();
int64_t pool;
int positions;
if (newcrush.has_non_straw2_buckets()) {
ss << "crush map contains one or more bucket(s) that are not straw2";
err = -EPERM;
goto reply;
}
if (prefix == "osd crush weight-set create") {
if (osdmap.require_min_compat_client != ceph_release_t::unknown &&
osdmap.require_min_compat_client < ceph_release_t::luminous) {
ss << "require_min_compat_client "
<< osdmap.require_min_compat_client
<< " < luminous, which is required for per-pool weight-sets. "
<< "Try 'ceph osd set-require-min-compat-client luminous' "
<< "before using the new interface";
err = -EPERM;
goto reply;
}
string poolname, mode;
cmd_getval(cmdmap, "pool", poolname);
pool = osdmap.lookup_pg_pool_name(poolname.c_str());
if (pool < 0) {
ss << "pool '" << poolname << "' not found";
err = -ENOENT;
goto reply;
}
cmd_getval(cmdmap, "mode", mode);
if (mode != "flat" && mode != "positional") {
ss << "unrecognized weight-set mode '" << mode << "'";
err = -EINVAL;
goto reply;
}
positions = mode == "flat" ? 1 : osdmap.get_pg_pool(pool)->get_size();
} else {
pool = CrushWrapper::DEFAULT_CHOOSE_ARGS;
positions = 1;
}
if (!newcrush.create_choose_args(pool, positions)) {
if (pool == CrushWrapper::DEFAULT_CHOOSE_ARGS) {
ss << "compat weight-set already created";
} else {
ss << "weight-set for pool '" << osdmap.get_pool_name(pool)
<< "' already created";
}
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
goto update;
} else if (prefix == "osd crush weight-set rm" ||
prefix == "osd crush weight-set rm-compat") {
CrushWrapper newcrush = _get_pending_crush();
int64_t pool;
if (prefix == "osd crush weight-set rm") {
string poolname;
cmd_getval(cmdmap, "pool", poolname);
pool = osdmap.lookup_pg_pool_name(poolname.c_str());
if (pool < 0) {
ss << "pool '" << poolname << "' not found";
err = -ENOENT;
goto reply;
}
} else {
pool = CrushWrapper::DEFAULT_CHOOSE_ARGS;
}
newcrush.rm_choose_args(pool);
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
goto update;
} else if (prefix == "osd crush weight-set reweight" ||
prefix == "osd crush weight-set reweight-compat") {
string poolname, item;
vector<double> weight;
cmd_getval(cmdmap, "pool", poolname);
cmd_getval(cmdmap, "item", item);
cmd_getval(cmdmap, "weight", weight);
CrushWrapper newcrush = _get_pending_crush();
int64_t pool;
if (prefix == "osd crush weight-set reweight") {
pool = osdmap.lookup_pg_pool_name(poolname.c_str());
if (pool < 0) {
ss << "pool '" << poolname << "' not found";
err = -ENOENT;
goto reply;
}
if (!newcrush.have_choose_args(pool)) {
ss << "no weight-set for pool '" << poolname << "'";
err = -ENOENT;
goto reply;
}
auto arg_map = newcrush.choose_args_get(pool);
int positions = newcrush.get_choose_args_positions(arg_map);
if (weight.size() != (size_t)positions) {
ss << "must specify exact " << positions << " weight values";
err = -EINVAL;
goto reply;
}
} else {
pool = CrushWrapper::DEFAULT_CHOOSE_ARGS;
if (!newcrush.have_choose_args(pool)) {
ss << "no backward-compatible weight-set";
err = -ENOENT;
goto reply;
}
}
if (!newcrush.name_exists(item)) {
ss << "item '" << item << "' does not exist";
err = -ENOENT;
goto reply;
}
err = newcrush.choose_args_adjust_item_weightf(
cct,
newcrush.choose_args_get(pool),
newcrush.get_item_id(item),
weight,
&ss);
if (err < 0) {
goto reply;
}
err = 0;
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
goto update;
} else if (osdid_present &&
(prefix == "osd crush set" || prefix == "osd crush add")) {
// <OsdName> is 'osd.<id>' or '<id>', passed as int64_t id
// osd crush set <OsdName> <weight> <loc1> [<loc2> ...]
// osd crush add <OsdName> <weight> <loc1> [<loc2> ...]
if (!osdmap.exists(osdid)) {
err = -ENOENT;
ss << osd_name
<< " does not exist. Create it before updating the crush map";
goto reply;
}
double weight;
if (!cmd_getval(cmdmap, "weight", weight)) {
ss << "unable to parse weight value '"
<< cmd_vartype_stringify(cmdmap.at("weight")) << "'";
err = -EINVAL;
goto reply;
}
string args;
vector<string> argvec;
cmd_getval(cmdmap, "args", argvec);
map<string,string> loc;
CrushWrapper::parse_loc_map(argvec, &loc);
if (prefix == "osd crush set"
&& !_get_stable_crush().item_exists(osdid)) {
err = -ENOENT;
ss << "unable to set item id " << osdid << " name '" << osd_name
<< "' weight " << weight << " at location " << loc
<< ": does not exist";
goto reply;
}
dout(5) << "adding/updating crush item id " << osdid << " name '"
<< osd_name << "' weight " << weight << " at location "
<< loc << dendl;
CrushWrapper newcrush = _get_pending_crush();
string action;
if (prefix == "osd crush set" ||
newcrush.check_item_loc(cct, osdid, loc, (int *)NULL)) {
action = "set";
err = newcrush.update_item(cct, osdid, weight, osd_name, loc);
} else {
action = "add";
err = newcrush.insert_item(cct, osdid, weight, osd_name, loc);
if (err == 0)
err = 1;
}
if (err < 0)
goto reply;
if (err == 0 && !_have_pending_crush()) {
ss << action << " item id " << osdid << " name '" << osd_name
<< "' weight " << weight << " at location " << loc << ": no change";
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << action << " item id " << osdid << " name '" << osd_name << "' weight "
<< weight << " at location " << loc << " to crush map";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush create-or-move") {
do {
// osd crush create-or-move <OsdName> <initial_weight> <loc1> [<loc2> ...]
if (!osdmap.exists(osdid)) {
err = -ENOENT;
ss << osd_name
<< " does not exist. create it before updating the crush map";
goto reply;
}
double weight;
if (!cmd_getval(cmdmap, "weight", weight)) {
ss << "unable to parse weight value '"
<< cmd_vartype_stringify(cmdmap.at("weight")) << "'";
err = -EINVAL;
goto reply;
}
string args;
vector<string> argvec;
cmd_getval(cmdmap, "args", argvec);
map<string,string> loc;
CrushWrapper::parse_loc_map(argvec, &loc);
dout(0) << "create-or-move crush item name '" << osd_name
<< "' initial_weight " << weight << " at location " << loc
<< dendl;
CrushWrapper newcrush = _get_pending_crush();
err = newcrush.create_or_move_item(cct, osdid, weight, osd_name, loc,
g_conf()->osd_crush_update_weight_set);
if (err == 0) {
ss << "create-or-move updated item name '" << osd_name
<< "' weight " << weight
<< " at location " << loc << " to crush map";
break;
}
if (err > 0) {
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "create-or-move updating item name '" << osd_name
<< "' weight " << weight
<< " at location " << loc << " to crush map";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
}
} while (false);
} else if (prefix == "osd crush move") {
do {
// osd crush move <name> <loc1> [<loc2> ...]
string name;
vector<string> argvec;
cmd_getval(cmdmap, "name", name);
cmd_getval(cmdmap, "args", argvec);
map<string,string> loc;
CrushWrapper::parse_loc_map(argvec, &loc);
dout(0) << "moving crush item name '" << name << "' to location " << loc << dendl;
CrushWrapper newcrush = _get_pending_crush();
if (!newcrush.name_exists(name)) {
err = -ENOENT;
ss << "item " << name << " does not exist";
break;
}
int id = newcrush.get_item_id(name);
if (!newcrush.check_item_loc(cct, id, loc, (int *)NULL)) {
if (id >= 0) {
err = newcrush.create_or_move_item(
cct, id, 0, name, loc,
g_conf()->osd_crush_update_weight_set);
} else {
err = newcrush.move_bucket(cct, id, loc);
}
if (err >= 0) {
ss << "moved item id " << id << " name '" << name << "' to location " << loc << " in crush map";
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
}
} else {
ss << "no need to move item id " << id << " name '" << name << "' to location " << loc << " in crush map";
err = 0;
}
} while (false);
} else if (prefix == "osd crush swap-bucket") {
string source, dest;
cmd_getval(cmdmap, "source", source);
cmd_getval(cmdmap, "dest", dest);
bool force = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", force);
CrushWrapper newcrush = _get_pending_crush();
if (!newcrush.name_exists(source)) {
ss << "source item " << source << " does not exist";
err = -ENOENT;
goto reply;
}
if (!newcrush.name_exists(dest)) {
ss << "dest item " << dest << " does not exist";
err = -ENOENT;
goto reply;
}
int sid = newcrush.get_item_id(source);
int did = newcrush.get_item_id(dest);
int sparent;
if (newcrush.get_immediate_parent_id(sid, &sparent) == 0 && !force) {
ss << "source item " << source << " is not an orphan bucket; pass --yes-i-really-mean-it to proceed anyway";
err = -EPERM;
goto reply;
}
if (newcrush.get_bucket_alg(sid) != newcrush.get_bucket_alg(did) &&
!force) {
ss << "source bucket alg " << crush_alg_name(newcrush.get_bucket_alg(sid)) << " != "
<< "dest bucket alg " << crush_alg_name(newcrush.get_bucket_alg(did))
<< "; pass --yes-i-really-mean-it to proceed anyway";
err = -EPERM;
goto reply;
}
int r = newcrush.swap_bucket(cct, sid, did);
if (r < 0) {
ss << "failed to swap bucket contents: " << cpp_strerror(r);
err = r;
goto reply;
}
ss << "swapped bucket of " << source << " to " << dest;
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
wait_for_finished_proposal(op,
new Monitor::C_Command(mon, op, err, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush link") {
// osd crush link <name> <loc1> [<loc2> ...]
string name;
cmd_getval(cmdmap, "name", name);
vector<string> argvec;
cmd_getval(cmdmap, "args", argvec);
map<string,string> loc;
CrushWrapper::parse_loc_map(argvec, &loc);
// Need an explicit check for name_exists because get_item_id returns
// 0 on unfound.
int id = osdmap.crush->get_item_id(name);
if (!osdmap.crush->name_exists(name)) {
err = -ENOENT;
ss << "item " << name << " does not exist";
goto reply;
} else {
dout(5) << "resolved crush name '" << name << "' to id " << id << dendl;
}
if (osdmap.crush->check_item_loc(cct, id, loc, (int*) NULL)) {
ss << "no need to move item id " << id << " name '" << name
<< "' to location " << loc << " in crush map";
err = 0;
goto reply;
}
dout(5) << "linking crush item name '" << name << "' at location " << loc << dendl;
CrushWrapper newcrush = _get_pending_crush();
if (!newcrush.name_exists(name)) {
err = -ENOENT;
ss << "item " << name << " does not exist";
goto reply;
} else {
int id = newcrush.get_item_id(name);
if (!newcrush.check_item_loc(cct, id, loc, (int *)NULL)) {
err = newcrush.link_bucket(cct, id, loc);
if (err >= 0) {
ss << "linked item id " << id << " name '" << name
<< "' to location " << loc << " in crush map";
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
} else {
ss << "cannot link item id " << id << " name '" << name
<< "' to location " << loc;
goto reply;
}
} else {
ss << "no need to move item id " << id << " name '" << name
<< "' to location " << loc << " in crush map";
err = 0;
}
}
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, err, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush rm" ||
prefix == "osd crush remove" ||
prefix == "osd crush unlink") {
do {
// osd crush rm <id> [ancestor]
CrushWrapper newcrush = _get_pending_crush();
string name;
cmd_getval(cmdmap, "name", name);
if (!osdmap.crush->name_exists(name)) {
err = 0;
ss << "device '" << name << "' does not appear in the crush map";
break;
}
if (!newcrush.name_exists(name)) {
err = 0;
ss << "device '" << name << "' does not appear in the crush map";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
}
int id = newcrush.get_item_id(name);
int ancestor = 0;
bool unlink_only = prefix == "osd crush unlink";
string ancestor_str;
if (cmd_getval(cmdmap, "ancestor", ancestor_str)) {
if (!newcrush.name_exists(ancestor_str)) {
err = -ENOENT;
ss << "ancestor item '" << ancestor_str
<< "' does not appear in the crush map";
break;
}
ancestor = newcrush.get_item_id(ancestor_str);
}
err = prepare_command_osd_crush_remove(
newcrush,
id, ancestor,
(ancestor < 0), unlink_only);
if (err == -ENOENT) {
ss << "item " << id << " does not appear in that position";
err = 0;
break;
}
if (err == 0) {
if (!unlink_only)
pending_inc.new_crush_node_flags[id] = 0;
ss << "removed item id " << id << " name '" << name << "' from crush map";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
}
} while (false);
} else if (prefix == "osd crush reweight-all") {
CrushWrapper newcrush = _get_pending_crush();
newcrush.reweight(cct);
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "reweighted crush hierarchy";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush reweight") {
// osd crush reweight <name> <weight>
CrushWrapper newcrush = _get_pending_crush();
string name;
cmd_getval(cmdmap, "name", name);
if (!newcrush.name_exists(name)) {
err = -ENOENT;
ss << "device '" << name << "' does not appear in the crush map";
goto reply;
}
int id = newcrush.get_item_id(name);
if (id < 0) {
ss << "device '" << name << "' is not a leaf in the crush map";
err = -EINVAL;
goto reply;
}
double w;
if (!cmd_getval(cmdmap, "weight", w)) {
ss << "unable to parse weight value '"
<< cmd_vartype_stringify(cmdmap.at("weight")) << "'";
err = -EINVAL;
goto reply;
}
err = newcrush.adjust_item_weightf(cct, id, w,
g_conf()->osd_crush_update_weight_set);
if (err < 0)
goto reply;
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "reweighted item id " << id << " name '" << name << "' to " << w
<< " in crush map";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush reweight-subtree") {
// osd crush reweight <name> <weight>
CrushWrapper newcrush = _get_pending_crush();
string name;
cmd_getval(cmdmap, "name", name);
if (!newcrush.name_exists(name)) {
err = -ENOENT;
ss << "device '" << name << "' does not appear in the crush map";
goto reply;
}
int id = newcrush.get_item_id(name);
if (id >= 0) {
ss << "device '" << name << "' is not a subtree in the crush map";
err = -EINVAL;
goto reply;
}
double w;
if (!cmd_getval(cmdmap, "weight", w)) {
ss << "unable to parse weight value '"
<< cmd_vartype_stringify(cmdmap.at("weight")) << "'";
err = -EINVAL;
goto reply;
}
err = newcrush.adjust_subtree_weightf(cct, id, w,
g_conf()->osd_crush_update_weight_set);
if (err < 0)
goto reply;
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "reweighted subtree id " << id << " name '" << name << "' to " << w
<< " in crush map";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush tunables") {
CrushWrapper newcrush = _get_pending_crush();
err = 0;
string profile;
cmd_getval(cmdmap, "profile", profile);
if (profile == "legacy" || profile == "argonaut") {
newcrush.set_tunables_legacy();
} else if (profile == "bobtail") {
newcrush.set_tunables_bobtail();
} else if (profile == "firefly") {
newcrush.set_tunables_firefly();
} else if (profile == "hammer") {
newcrush.set_tunables_hammer();
} else if (profile == "jewel") {
newcrush.set_tunables_jewel();
} else if (profile == "optimal") {
newcrush.set_tunables_optimal();
} else if (profile == "default") {
newcrush.set_tunables_default();
} else {
ss << "unrecognized profile '" << profile << "'";
err = -EINVAL;
goto reply;
}
if (!validate_crush_against_features(&newcrush, ss)) {
err = -EINVAL;
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "adjusted tunables profile to " << profile;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush set-tunable") {
CrushWrapper newcrush = _get_pending_crush();
err = 0;
string tunable;
cmd_getval(cmdmap, "tunable", tunable);
int64_t value = -1;
if (!cmd_getval(cmdmap, "value", value)) {
err = -EINVAL;
ss << "failed to parse integer value "
<< cmd_vartype_stringify(cmdmap.at("value"));
goto reply;
}
if (tunable == "straw_calc_version") {
if (value != 0 && value != 1) {
ss << "value must be 0 or 1; got " << value;
err = -EINVAL;
goto reply;
}
newcrush.set_straw_calc_version(value);
} else {
ss << "unrecognized tunable '" << tunable << "'";
err = -EINVAL;
goto reply;
}
if (!validate_crush_against_features(&newcrush, ss)) {
err = -EINVAL;
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
ss << "adjusted tunable " << tunable << " to " << value;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush rule create-simple") {
string name, root, type, mode;
cmd_getval(cmdmap, "name", name);
cmd_getval(cmdmap, "root", root);
cmd_getval(cmdmap, "type", type);
cmd_getval(cmdmap, "mode", mode);
if (mode == "")
mode = "firstn";
if (osdmap.crush->rule_exists(name)) {
// The name is uniquely associated to a ruleid and the rule it contains
// From the user point of view, the rule is more meaningfull.
ss << "rule " << name << " already exists";
err = 0;
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (newcrush.rule_exists(name)) {
// The name is uniquely associated to a ruleid and the rule it contains
// From the user point of view, the rule is more meaningfull.
ss << "rule " << name << " already exists";
err = 0;
} else {
int ruleno = newcrush.add_simple_rule(name, root, type, "", mode,
pg_pool_t::TYPE_REPLICATED, &ss);
if (ruleno < 0) {
err = ruleno;
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush rule create-replicated") {
string name, root, type, device_class;
cmd_getval(cmdmap, "name", name);
cmd_getval(cmdmap, "root", root);
cmd_getval(cmdmap, "type", type);
cmd_getval(cmdmap, "class", device_class);
if (osdmap.crush->rule_exists(name)) {
// The name is uniquely associated to a ruleid and the rule it contains
// From the user point of view, the rule is more meaningfull.
ss << "rule " << name << " already exists";
err = 0;
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (newcrush.rule_exists(name)) {
// The name is uniquely associated to a ruleid and the rule it contains
// From the user point of view, the rule is more meaningfull.
ss << "rule " << name << " already exists";
err = 0;
} else {
int ruleno = newcrush.add_simple_rule(
name, root, type, device_class,
"firstn", pg_pool_t::TYPE_REPLICATED, &ss);
if (ruleno < 0) {
err = ruleno;
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd erasure-code-profile rm") {
string name;
cmd_getval(cmdmap, "name", name);
if (erasure_code_profile_in_use(pending_inc.new_pools, name, &ss))
goto wait;
if (erasure_code_profile_in_use(osdmap.pools, name, &ss)) {
err = -EBUSY;
goto reply;
}
if (osdmap.has_erasure_code_profile(name) ||
pending_inc.new_erasure_code_profiles.count(name)) {
if (osdmap.has_erasure_code_profile(name)) {
pending_inc.old_erasure_code_profiles.push_back(name);
} else {
dout(20) << "erasure code profile rm " << name << ": creation canceled" << dendl;
pending_inc.new_erasure_code_profiles.erase(name);
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else {
ss << "erasure-code-profile " << name << " does not exist";
err = 0;
goto reply;
}
} else if (prefix == "osd erasure-code-profile set") {
string name;
cmd_getval(cmdmap, "name", name);
vector<string> profile;
cmd_getval(cmdmap, "profile", profile);
bool force = false;
cmd_getval(cmdmap, "force", force);
map<string,string> profile_map;
err = parse_erasure_code_profile(profile, &profile_map, &ss);
if (err)
goto reply;
if (auto found = profile_map.find("crush-failure-domain");
found != profile_map.end()) {
const auto& failure_domain = found->second;
int failure_domain_type = osdmap.crush->get_type_id(failure_domain);
if (failure_domain_type < 0) {
ss << "erasure-code-profile " << profile_map
<< " contains an invalid failure-domain " << std::quoted(failure_domain);
err = -EINVAL;
goto reply;
}
}
if (profile_map.find("plugin") == profile_map.end()) {
ss << "erasure-code-profile " << profile_map
<< " must contain a plugin entry" << std::endl;
err = -EINVAL;
goto reply;
}
string plugin = profile_map["plugin"];
if (pending_inc.has_erasure_code_profile(name)) {
dout(20) << "erasure code profile " << name << " try again" << dendl;
goto wait;
} else {
err = normalize_profile(name, profile_map, force, &ss);
if (err)
goto reply;
if (osdmap.has_erasure_code_profile(name)) {
ErasureCodeProfile existing_profile_map =
osdmap.get_erasure_code_profile(name);
err = normalize_profile(name, existing_profile_map, force, &ss);
if (err)
goto reply;
if (existing_profile_map == profile_map) {
err = 0;
goto reply;
}
if (!force) {
err = -EPERM;
ss << "will not override erasure code profile " << name
<< " because the existing profile "
<< existing_profile_map
<< " is different from the proposed profile "
<< profile_map;
goto reply;
}
}
dout(20) << "erasure code profile set " << name << "="
<< profile_map << dendl;
pending_inc.set_erasure_code_profile(name, profile_map);
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush rule create-erasure") {
err = check_cluster_features(CEPH_FEATURE_CRUSH_V2, ss);
if (err == -EAGAIN)
goto wait;
if (err)
goto reply;
string name, poolstr;
cmd_getval(cmdmap, "name", name);
string profile;
cmd_getval(cmdmap, "profile", profile);
if (profile == "")
profile = "default";
if (profile == "default") {
if (!osdmap.has_erasure_code_profile(profile)) {
if (pending_inc.has_erasure_code_profile(profile)) {
dout(20) << "erasure code profile " << profile << " already pending" << dendl;
goto wait;
}
map<string,string> profile_map;
err = osdmap.get_erasure_code_profile_default(cct,
profile_map,
&ss);
if (err)
goto reply;
err = normalize_profile(name, profile_map, true, &ss);
if (err)
goto reply;
dout(20) << "erasure code profile set " << profile << "="
<< profile_map << dendl;
pending_inc.set_erasure_code_profile(profile, profile_map);
goto wait;
}
}
int rule;
err = crush_rule_create_erasure(name, profile, &rule, &ss);
if (err < 0) {
switch(err) {
case -EEXIST: // return immediately
ss << "rule " << name << " already exists";
err = 0;
goto reply;
break;
case -EALREADY: // wait for pending to be proposed
ss << "rule " << name << " already exists";
err = 0;
break;
default: // non recoverable error
goto reply;
break;
}
} else {
ss << "created rule " << name << " at " << rule;
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush rule rm") {
string name;
cmd_getval(cmdmap, "name", name);
if (!osdmap.crush->rule_exists(name)) {
ss << "rule " << name << " does not exist";
err = 0;
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (!newcrush.rule_exists(name)) {
ss << "rule " << name << " does not exist";
err = 0;
} else {
int ruleno = newcrush.get_rule_id(name);
ceph_assert(ruleno >= 0);
// make sure it is not in use.
// FIXME: this is ok in some situations, but let's not bother with that
// complexity now.
if (osdmap.crush_rule_in_use(ruleno)) {
ss << "crush rule " << name << " (" << ruleno << ") is in use";
err = -EBUSY;
goto reply;
}
err = newcrush.remove_rule(ruleno);
if (err < 0) {
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd crush rule rename") {
string srcname;
string dstname;
cmd_getval(cmdmap, "srcname", srcname);
cmd_getval(cmdmap, "dstname", dstname);
if (srcname.empty() || dstname.empty()) {
ss << "must specify both source rule name and destination rule name";
err = -EINVAL;
goto reply;
}
if (srcname == dstname) {
ss << "destination rule name is equal to source rule name";
err = 0;
goto reply;
}
CrushWrapper newcrush = _get_pending_crush();
if (!newcrush.rule_exists(srcname) && newcrush.rule_exists(dstname)) {
// srcname does not exist and dstname already exists
// suppose this is a replay and return success
// (so this command is idempotent)
ss << "already renamed to '" << dstname << "'";
err = 0;
goto reply;
}
err = newcrush.rename_rule(srcname, dstname, &ss);
if (err < 0) {
// ss has reason for failure
goto reply;
}
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd setmaxosd") {
int64_t newmax;
if (!cmd_getval(cmdmap, "newmax", newmax)) {
ss << "unable to parse 'newmax' value '"
<< cmd_vartype_stringify(cmdmap.at("newmax")) << "'";
err = -EINVAL;
goto reply;
}
if (newmax > g_conf()->mon_max_osd) {
err = -ERANGE;
ss << "cannot set max_osd to " << newmax << " which is > conf.mon_max_osd ("
<< g_conf()->mon_max_osd << ")";
goto reply;
}
// Don't allow shrinking OSD number as this will cause data loss
// and may cause kernel crashes.
// Note: setmaxosd sets the maximum OSD number and not the number of OSDs
if (newmax < osdmap.get_max_osd()) {
// Check if the OSDs exist between current max and new value.
// If there are any OSDs exist, then don't allow shrinking number
// of OSDs.
for (int i = newmax; i < osdmap.get_max_osd(); i++) {
if (osdmap.exists(i)) {
err = -EBUSY;
ss << "cannot shrink max_osd to " << newmax
<< " because osd." << i << " (and possibly others) still in use";
goto reply;
}
}
}
pending_inc.new_max_osd = newmax;
ss << "set new max_osd = " << pending_inc.new_max_osd;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd set-full-ratio" ||
prefix == "osd set-backfillfull-ratio" ||
prefix == "osd set-nearfull-ratio") {
double n;
if (!cmd_getval(cmdmap, "ratio", n)) {
ss << "unable to parse 'ratio' value '"
<< cmd_vartype_stringify(cmdmap.at("ratio")) << "'";
err = -EINVAL;
goto reply;
}
if (prefix == "osd set-full-ratio")
pending_inc.new_full_ratio = n;
else if (prefix == "osd set-backfillfull-ratio")
pending_inc.new_backfillfull_ratio = n;
else if (prefix == "osd set-nearfull-ratio")
pending_inc.new_nearfull_ratio = n;
ss << prefix << " " << n;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd set-require-min-compat-client") {
string v;
cmd_getval(cmdmap, "version", v);
ceph_release_t vno = ceph_release_from_name(v);
if (!vno) {
ss << "version " << v << " is not recognized";
err = -EINVAL;
goto reply;
}
OSDMap newmap;
newmap.deepish_copy_from(osdmap);
newmap.apply_incremental(pending_inc);
newmap.require_min_compat_client = vno;
auto mvno = newmap.get_min_compat_client();
if (vno < mvno) {
ss << "osdmap current utilizes features that require " << mvno
<< "; cannot set require_min_compat_client below that to " << vno;
err = -EPERM;
goto reply;
}
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) {
FeatureMap m;
mon.get_combined_feature_map(&m);
uint64_t features = ceph_release_features(to_integer<int>(vno));
bool first = true;
bool ok = true;
for (int type : {
CEPH_ENTITY_TYPE_CLIENT,
CEPH_ENTITY_TYPE_MDS,
CEPH_ENTITY_TYPE_MGR }) {
auto p = m.m.find(type);
if (p == m.m.end()) {
continue;
}
for (auto& q : p->second) {
uint64_t missing = ~q.first & features;
if (missing) {
if (first) {
ss << "cannot set require_min_compat_client to " << v << ": ";
} else {
ss << "; ";
}
first = false;
ss << q.second << " connected " << ceph_entity_type_name(type)
<< "(s) look like " << ceph_release_name(
ceph_release_from_features(q.first))
<< " (missing 0x" << std::hex << missing << std::dec << ")";
ok = false;
}
}
}
if (!ok) {
ss << "; add --yes-i-really-mean-it to do it anyway";
err = -EPERM;
goto reply;
}
}
ss << "set require_min_compat_client to " << vno;
pending_inc.new_require_min_compat_client = vno;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd pause") {
return prepare_set_flag(op, CEPH_OSDMAP_PAUSERD | CEPH_OSDMAP_PAUSEWR);
} else if (prefix == "osd unpause") {
return prepare_unset_flag(op, CEPH_OSDMAP_PAUSERD | CEPH_OSDMAP_PAUSEWR);
} else if (prefix == "osd set") {
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
string key;
cmd_getval(cmdmap, "key", key);
if (key == "pause")
return prepare_set_flag(op, CEPH_OSDMAP_PAUSERD | CEPH_OSDMAP_PAUSEWR);
else if (key == "noup")
return prepare_set_flag(op, CEPH_OSDMAP_NOUP);
else if (key == "nodown")
return prepare_set_flag(op, CEPH_OSDMAP_NODOWN);
else if (key == "noout")
return prepare_set_flag(op, CEPH_OSDMAP_NOOUT);
else if (key == "noin")
return prepare_set_flag(op, CEPH_OSDMAP_NOIN);
else if (key == "nobackfill")
return prepare_set_flag(op, CEPH_OSDMAP_NOBACKFILL);
else if (key == "norebalance")
return prepare_set_flag(op, CEPH_OSDMAP_NOREBALANCE);
else if (key == "norecover")
return prepare_set_flag(op, CEPH_OSDMAP_NORECOVER);
else if (key == "noscrub")
return prepare_set_flag(op, CEPH_OSDMAP_NOSCRUB);
else if (key == "nodeep-scrub")
return prepare_set_flag(op, CEPH_OSDMAP_NODEEP_SCRUB);
else if (key == "notieragent")
return prepare_set_flag(op, CEPH_OSDMAP_NOTIERAGENT);
else if (key == "nosnaptrim")
return prepare_set_flag(op, CEPH_OSDMAP_NOSNAPTRIM);
else if (key == "pglog_hardlimit") {
if (!osdmap.get_num_up_osds() && !sure) {
ss << "Not advisable to continue since no OSDs are up. Pass "
<< "--yes-i-really-mean-it if you really wish to continue.";
err = -EPERM;
goto reply;
}
// The release check here is required because for OSD_PGLOG_HARDLIMIT,
// we are reusing a jewel feature bit that was retired in luminous.
if (osdmap.require_osd_release >= ceph_release_t::luminous &&
(HAVE_FEATURE(osdmap.get_up_osd_features(), OSD_PGLOG_HARDLIMIT)
|| sure)) {
return prepare_set_flag(op, CEPH_OSDMAP_PGLOG_HARDLIMIT);
} else {
ss << "not all up OSDs have OSD_PGLOG_HARDLIMIT feature";
err = -EPERM;
goto reply;
}
} else {
ss << "unrecognized flag '" << key << "'";
err = -EINVAL;
}
} else if (prefix == "osd unset") {
string key;
cmd_getval(cmdmap, "key", key);
if (key == "pause")
return prepare_unset_flag(op, CEPH_OSDMAP_PAUSERD | CEPH_OSDMAP_PAUSEWR);
else if (key == "noup")
return prepare_unset_flag(op, CEPH_OSDMAP_NOUP);
else if (key == "nodown")
return prepare_unset_flag(op, CEPH_OSDMAP_NODOWN);
else if (key == "noout")
return prepare_unset_flag(op, CEPH_OSDMAP_NOOUT);
else if (key == "noin")
return prepare_unset_flag(op, CEPH_OSDMAP_NOIN);
else if (key == "nobackfill")
return prepare_unset_flag(op, CEPH_OSDMAP_NOBACKFILL);
else if (key == "norebalance")
return prepare_unset_flag(op, CEPH_OSDMAP_NOREBALANCE);
else if (key == "norecover")
return prepare_unset_flag(op, CEPH_OSDMAP_NORECOVER);
else if (key == "noscrub")
return prepare_unset_flag(op, CEPH_OSDMAP_NOSCRUB);
else if (key == "nodeep-scrub")
return prepare_unset_flag(op, CEPH_OSDMAP_NODEEP_SCRUB);
else if (key == "notieragent")
return prepare_unset_flag(op, CEPH_OSDMAP_NOTIERAGENT);
else if (key == "nosnaptrim")
return prepare_unset_flag(op, CEPH_OSDMAP_NOSNAPTRIM);
else {
ss << "unrecognized flag '" << key << "'";
err = -EINVAL;
}
} else if (prefix == "osd require-osd-release") {
string release;
cmd_getval(cmdmap, "release", release);
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
ceph_release_t rel = ceph_release_from_name(release.c_str());
if (!rel) {
ss << "unrecognized release " << release;
err = -EINVAL;
goto reply;
}
if (rel == osdmap.require_osd_release) {
// idempotent
err = 0;
goto reply;
}
if (osdmap.require_osd_release < ceph_release_t::pacific && !sure) {
ss << "Not advisable to continue since current 'require_osd_release' "
<< "refers to a very old Ceph release. Pass "
<< "--yes-i-really-mean-it if you really wish to continue.";
err = -EPERM;
goto reply;
}
if (!osdmap.get_num_up_osds() && !sure) {
ss << "Not advisable to continue since no OSDs are up. Pass "
<< "--yes-i-really-mean-it if you really wish to continue.";
err = -EPERM;
goto reply;
}
if (rel == ceph_release_t::pacific) {
if (!mon.monmap->get_required_features().contains_all(
ceph::features::mon::FEATURE_PACIFIC)) {
ss << "not all mons are pacific";
err = -EPERM;
goto reply;
}
if ((!HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_PACIFIC))
&& !sure) {
ss << "not all up OSDs have CEPH_FEATURE_SERVER_PACIFIC feature";
err = -EPERM;
goto reply;
}
} else if (rel == ceph_release_t::quincy) {
if (!mon.monmap->get_required_features().contains_all(
ceph::features::mon::FEATURE_QUINCY)) {
ss << "not all mons are quincy";
err = -EPERM;
goto reply;
}
if ((!HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_QUINCY))
&& !sure) {
ss << "not all up OSDs have CEPH_FEATURE_SERVER_QUINCY feature";
err = -EPERM;
goto reply;
}
} else if (rel == ceph_release_t::reef) {
if (!mon.monmap->get_required_features().contains_all(
ceph::features::mon::FEATURE_REEF)) {
ss << "not all mons are reef";
err = -EPERM;
goto reply;
}
if ((!HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_REEF))
&& !sure) {
ss << "not all up OSDs have CEPH_FEATURE_SERVER_REEF feature";
err = -EPERM;
goto reply;
}
} else {
ss << "not supported for this release";
err = -EPERM;
goto reply;
}
if (rel < osdmap.require_osd_release) {
ss << "require_osd_release cannot be lowered once it has been set";
err = -EPERM;
goto reply;
}
pending_inc.new_require_osd_release = rel;
goto update;
} else if (prefix == "osd down" ||
prefix == "osd out" ||
prefix == "osd in" ||
prefix == "osd rm" ||
prefix == "osd stop") {
bool any = false;
bool stop = false;
bool verbose = true;
bool definitely_dead = false;
vector<string> idvec;
cmd_getval(cmdmap, "ids", idvec);
cmd_getval(cmdmap, "definitely_dead", definitely_dead);
derr << "definitely_dead " << (int)definitely_dead << dendl;
for (unsigned j = 0; j < idvec.size() && !stop; j++) {
set<int> osds;
// wildcard?
if (j == 0 &&
(idvec[0] == "any" || idvec[0] == "all" || idvec[0] == "*")) {
if (prefix == "osd in") {
// touch out osds only
osdmap.get_out_existing_osds(osds);
} else {
osdmap.get_all_osds(osds);
}
stop = true;
verbose = false; // so the output is less noisy.
} else {
long osd = parse_osd_id(idvec[j].c_str(), &ss);
if (osd < 0) {
ss << "invalid osd id" << osd;
err = -EINVAL;
continue;
} else if (!osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist. ";
continue;
}
osds.insert(osd);
}
for (auto &osd : osds) {
if (prefix == "osd down") {
if (osdmap.is_down(osd)) {
if (verbose)
ss << "osd." << osd << " is already down. ";
} else {
pending_inc.pending_osd_state_set(osd, CEPH_OSD_UP);
ss << "marked down osd." << osd << ". ";
any = true;
}
if (definitely_dead) {
if (!pending_inc.new_xinfo.count(osd)) {
pending_inc.new_xinfo[osd] = osdmap.osd_xinfo[osd];
}
if (pending_inc.new_xinfo[osd].dead_epoch < pending_inc.epoch) {
any = true;
}
pending_inc.new_xinfo[osd].dead_epoch = pending_inc.epoch;
}
} else if (prefix == "osd out") {
if (osdmap.is_out(osd)) {
if (verbose)
ss << "osd." << osd << " is already out. ";
} else {
pending_inc.new_weight[osd] = CEPH_OSD_OUT;
if (osdmap.osd_weight[osd]) {
if (pending_inc.new_xinfo.count(osd) == 0) {
pending_inc.new_xinfo[osd] = osdmap.osd_xinfo[osd];
}
pending_inc.new_xinfo[osd].old_weight = osdmap.osd_weight[osd];
}
ss << "marked out osd." << osd << ". ";
std::ostringstream msg;
msg << "Client " << op->get_session()->entity_name
<< " marked osd." << osd << " out";
if (osdmap.is_up(osd)) {
msg << ", while it was still marked up";
} else {
auto period = ceph_clock_now() - down_pending_out[osd];
msg << ", after it was down for " << int(period.sec())
<< " seconds";
}
mon.clog->info() << msg.str();
any = true;
}
} else if (prefix == "osd in") {
if (osdmap.is_in(osd)) {
if (verbose)
ss << "osd." << osd << " is already in. ";
} else {
if (osdmap.osd_xinfo[osd].old_weight > 0) {
pending_inc.new_weight[osd] = osdmap.osd_xinfo[osd].old_weight;
if (pending_inc.new_xinfo.count(osd) == 0) {
pending_inc.new_xinfo[osd] = osdmap.osd_xinfo[osd];
}
pending_inc.new_xinfo[osd].old_weight = 0;
} else {
pending_inc.new_weight[osd] = CEPH_OSD_IN;
}
ss << "marked in osd." << osd << ". ";
any = true;
}
} else if (prefix == "osd rm") {
err = prepare_command_osd_remove(osd);
if (err == -EBUSY) {
if (any)
ss << ", ";
ss << "osd." << osd << " is still up; must be down before removal. ";
} else {
ceph_assert(err == 0);
if (any) {
ss << ", osd." << osd;
} else {
ss << "removed osd." << osd;
}
any = true;
}
} else if (prefix == "osd stop") {
if (osdmap.is_stop(osd)) {
if (verbose)
ss << "osd." << osd << " is already stopped. ";
} else if (osdmap.is_down(osd)) {
pending_inc.pending_osd_state_set(osd, CEPH_OSD_STOP);
ss << "stop down osd." << osd << ". ";
any = true;
} else {
pending_inc.pending_osd_state_set(osd, CEPH_OSD_UP | CEPH_OSD_STOP);
ss << "stop osd." << osd << ". ";
any = true;
}
}
}
}
if (any) {
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, err, rs,
get_last_committed() + 1));
return true;
}
} else if (prefix == "osd set-group" ||
prefix == "osd unset-group" ||
prefix == "osd add-noup" ||
prefix == "osd add-nodown" ||
prefix == "osd add-noin" ||
prefix == "osd add-noout" ||
prefix == "osd rm-noup" ||
prefix == "osd rm-nodown" ||
prefix == "osd rm-noin" ||
prefix == "osd rm-noout") {
bool do_set = prefix == "osd set-group" ||
prefix.find("add") != string::npos;
string flag_str;
unsigned flags = 0;
vector<string> who;
if (prefix == "osd set-group" || prefix == "osd unset-group") {
cmd_getval(cmdmap, "flags", flag_str);
cmd_getval(cmdmap, "who", who);
vector<string> raw_flags;
boost::split(raw_flags, flag_str, boost::is_any_of(","));
for (auto& f : raw_flags) {
if (f == "noup")
flags |= CEPH_OSD_NOUP;
else if (f == "nodown")
flags |= CEPH_OSD_NODOWN;
else if (f == "noin")
flags |= CEPH_OSD_NOIN;
else if (f == "noout")
flags |= CEPH_OSD_NOOUT;
else {
ss << "unrecognized flag '" << f << "', must be one of "
<< "{noup,nodown,noin,noout}";
err = -EINVAL;
goto reply;
}
}
} else {
cmd_getval(cmdmap, "ids", who);
if (prefix.find("noup") != string::npos)
flags = CEPH_OSD_NOUP;
else if (prefix.find("nodown") != string::npos)
flags = CEPH_OSD_NODOWN;
else if (prefix.find("noin") != string::npos)
flags = CEPH_OSD_NOIN;
else if (prefix.find("noout") != string::npos)
flags = CEPH_OSD_NOOUT;
else
ceph_assert(0 == "Unreachable!");
}
if (flags == 0) {
ss << "must specify flag(s) {noup,nodwon,noin,noout} to set/unset";
err = -EINVAL;
goto reply;
}
if (who.empty()) {
ss << "must specify at least one or more targets to set/unset";
err = -EINVAL;
goto reply;
}
set<int> osds;
set<int> crush_nodes;
set<int> device_classes;
for (auto& w : who) {
if (w == "any" || w == "all" || w == "*") {
osdmap.get_all_osds(osds);
break;
}
std::stringstream ts;
if (auto osd = parse_osd_id(w.c_str(), &ts); osd >= 0) {
osds.insert(osd);
} else if (osdmap.crush->name_exists(w)) {
crush_nodes.insert(osdmap.crush->get_item_id(w));
} else if (osdmap.crush->class_exists(w)) {
device_classes.insert(osdmap.crush->get_class_id(w));
} else {
ss << "unable to parse osd id or crush node or device class: "
<< "\"" << w << "\". ";
}
}
if (osds.empty() && crush_nodes.empty() && device_classes.empty()) {
// ss has reason for failure
err = -EINVAL;
goto reply;
}
bool any = false;
for (auto osd : osds) {
if (!osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist. ";
continue;
}
if (do_set) {
if (flags & CEPH_OSD_NOUP) {
any |= osdmap.is_noup_by_osd(osd) ?
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NOUP) :
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NOUP);
}
if (flags & CEPH_OSD_NODOWN) {
any |= osdmap.is_nodown_by_osd(osd) ?
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NODOWN) :
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NODOWN);
}
if (flags & CEPH_OSD_NOIN) {
any |= osdmap.is_noin_by_osd(osd) ?
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NOIN) :
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NOIN);
}
if (flags & CEPH_OSD_NOOUT) {
any |= osdmap.is_noout_by_osd(osd) ?
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NOOUT) :
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NOOUT);
}
} else {
if (flags & CEPH_OSD_NOUP) {
any |= osdmap.is_noup_by_osd(osd) ?
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NOUP) :
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NOUP);
}
if (flags & CEPH_OSD_NODOWN) {
any |= osdmap.is_nodown_by_osd(osd) ?
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NODOWN) :
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NODOWN);
}
if (flags & CEPH_OSD_NOIN) {
any |= osdmap.is_noin_by_osd(osd) ?
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NOIN) :
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NOIN);
}
if (flags & CEPH_OSD_NOOUT) {
any |= osdmap.is_noout_by_osd(osd) ?
pending_inc.pending_osd_state_set(osd, CEPH_OSD_NOOUT) :
pending_inc.pending_osd_state_clear(osd, CEPH_OSD_NOOUT);
}
}
}
for (auto& id : crush_nodes) {
auto old_flags = osdmap.get_crush_node_flags(id);
auto& pending_flags = pending_inc.new_crush_node_flags[id];
pending_flags |= old_flags; // adopt existing flags first!
if (do_set) {
pending_flags |= flags;
} else {
pending_flags &= ~flags;
}
any = true;
}
for (auto& id : device_classes) {
auto old_flags = osdmap.get_device_class_flags(id);
auto& pending_flags = pending_inc.new_device_class_flags[id];
pending_flags |= old_flags;
if (do_set) {
pending_flags |= flags;
} else {
pending_flags &= ~flags;
}
any = true;
}
if (any) {
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, err, rs,
get_last_committed() + 1));
return true;
}
} else if (prefix == "osd pg-temp") {
pg_t pgid;
err = parse_pgid(cmdmap, ss, pgid);
if (err < 0)
goto reply;
if (pending_inc.new_pg_temp.count(pgid)) {
dout(10) << __func__ << " waiting for pending update on " << pgid << dendl;
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
vector<int64_t> id_vec;
vector<int32_t> new_pg_temp;
cmd_getval(cmdmap, "id", id_vec);
if (id_vec.empty()) {
pending_inc.new_pg_temp[pgid] = mempool::osdmap::vector<int>();
ss << "done cleaning up pg_temp of " << pgid;
goto update;
}
for (auto osd : id_vec) {
if (!osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist";
err = -ENOENT;
goto reply;
}
new_pg_temp.push_back(osd);
}
int pool_min_size = osdmap.get_pg_pool_min_size(pgid);
if ((int)new_pg_temp.size() < pool_min_size) {
ss << "num of osds (" << new_pg_temp.size() <<") < pool min size ("
<< pool_min_size << ")";
err = -EINVAL;
goto reply;
}
int pool_size = osdmap.get_pg_pool_size(pgid);
if ((int)new_pg_temp.size() > pool_size) {
ss << "num of osds (" << new_pg_temp.size() <<") > pool size ("
<< pool_size << ")";
err = -EINVAL;
goto reply;
}
pending_inc.new_pg_temp[pgid] = mempool::osdmap::vector<int>(
new_pg_temp.begin(), new_pg_temp.end());
ss << "set " << pgid << " pg_temp mapping to " << new_pg_temp;
goto update;
} else if (prefix == "osd primary-temp" ||
prefix == "osd rm-primary-temp") {
pg_t pgid;
err = parse_pgid(cmdmap, ss, pgid);
if (err < 0)
goto reply;
int64_t osd;
if (prefix == "osd primary-temp") {
if (!cmd_getval(cmdmap, "id", osd)) {
ss << "unable to parse 'id' value '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "'";
err = -EINVAL;
goto reply;
}
if (!osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist";
err = -ENOENT;
goto reply;
}
}
else if (prefix == "osd rm-primary-temp") {
osd = -1;
}
else {
ceph_assert(0 == "Unreachable!");
}
if (osdmap.require_min_compat_client != ceph_release_t::unknown &&
osdmap.require_min_compat_client < ceph_release_t::firefly) {
ss << "require_min_compat_client "
<< osdmap.require_min_compat_client
<< " < firefly, which is required for primary-temp";
err = -EPERM;
goto reply;
}
pending_inc.new_primary_temp[pgid] = osd;
ss << "set " << pgid << " primary_temp mapping to " << osd;
goto update;
} else if (prefix == "pg repeer") {
pg_t pgid;
err = parse_pgid(cmdmap, ss, pgid);
if (err < 0)
goto reply;
vector<int> acting;
int primary;
osdmap.pg_to_acting_osds(pgid, &acting, &primary);
if (primary < 0) {
err = -EAGAIN;
ss << "pg currently has no primary";
goto reply;
}
if (acting.size() > 1) {
// map to just primary; it will map back to what it wants
pending_inc.new_pg_temp[pgid] = { primary };
} else {
// hmm, pick another arbitrary osd to induce a change. Note
// that this won't work if there is only one suitable OSD in the cluster.
int i;
bool done = false;
for (i = 0; i < osdmap.get_max_osd(); ++i) {
if (i == primary || !osdmap.is_up(i) || !osdmap.exists(i)) {
continue;
}
pending_inc.new_pg_temp[pgid] = { primary, i };
done = true;
break;
}
if (!done) {
err = -EAGAIN;
ss << "not enough up OSDs in the cluster to force repeer";
goto reply;
}
}
goto update;
} else if (prefix == "osd pg-upmap" ||
prefix == "osd rm-pg-upmap" ||
prefix == "osd pg-upmap-items" ||
prefix == "osd rm-pg-upmap-items" ||
prefix == "osd pg-upmap-primary" ||
prefix == "osd rm-pg-upmap-primary") {
enum {
OP_PG_UPMAP,
OP_RM_PG_UPMAP,
OP_PG_UPMAP_ITEMS,
OP_RM_PG_UPMAP_ITEMS,
OP_PG_UPMAP_PRIMARY,
OP_RM_PG_UPMAP_PRIMARY,
} upmap_option;
if (prefix == "osd pg-upmap") {
upmap_option = OP_PG_UPMAP;
} else if (prefix == "osd rm-pg-upmap") {
upmap_option = OP_RM_PG_UPMAP;
} else if (prefix == "osd pg-upmap-items") {
upmap_option = OP_PG_UPMAP_ITEMS;
} else if (prefix == "osd rm-pg-upmap-items") {
upmap_option = OP_RM_PG_UPMAP_ITEMS;
} else if (prefix == "osd pg-upmap-primary") {
upmap_option = OP_PG_UPMAP_PRIMARY;
} else if (prefix == "osd rm-pg-upmap-primary") {
upmap_option = OP_RM_PG_UPMAP_PRIMARY;
} else {
ceph_abort_msg("invalid upmap option");
}
ceph_release_t min_release = ceph_release_t::unknown;
string feature_name = "unknown";
switch (upmap_option) {
case OP_PG_UPMAP: // fall through
case OP_RM_PG_UPMAP: // fall through
case OP_PG_UPMAP_ITEMS: // fall through
case OP_RM_PG_UPMAP_ITEMS:
min_release = ceph_release_t::luminous;
feature_name = "pg-upmap";
break;
case OP_PG_UPMAP_PRIMARY: // fall through
case OP_RM_PG_UPMAP_PRIMARY:
min_release = ceph_release_t::reef;
feature_name = "pg-upmap-primary";
break;
default:
ceph_abort_msg("invalid upmap option");
}
uint64_t min_feature = CEPH_FEATUREMASK_OSDMAP_PG_UPMAP;
string min_release_name = ceph_release_name(static_cast<int>(min_release));
if (osdmap.require_min_compat_client < min_release) {
ss << "min_compat_client "
<< osdmap.require_min_compat_client
<< " < " << min_release_name << ", which is required for " << feature_name << ". "
<< "Try 'ceph osd set-require-min-compat-client " << min_release_name << "' "
<< "before using the new interface";
err = -EPERM;
goto reply;
}
//TODO: Should I add feature and test for upmap-primary?
err = check_cluster_features(min_feature, ss);
if (err == -EAGAIN)
goto wait;
if (err < 0)
goto reply;
pg_t pgid;
err = parse_pgid(cmdmap, ss, pgid);
if (err < 0)
goto reply;
if (pending_inc.old_pools.count(pgid.pool())) {
ss << "pool of " << pgid << " is pending removal";
err = -ENOENT;
getline(ss, rs);
wait_for_finished_proposal(op,
new Monitor::C_Command(mon, op, err, rs, get_last_committed() + 1));
return true;
}
// check pending upmap changes
switch (upmap_option) {
case OP_PG_UPMAP: // fall through
case OP_RM_PG_UPMAP:
if (pending_inc.new_pg_upmap.count(pgid) ||
pending_inc.old_pg_upmap.count(pgid)) {
dout(10) << __func__ << " waiting for pending update on "
<< pgid << dendl;
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
break;
case OP_PG_UPMAP_PRIMARY: // fall through
case OP_RM_PG_UPMAP_PRIMARY:
{
const pg_pool_t *pt = osdmap.get_pg_pool(pgid.pool());
if (! pt->is_replicated()) {
ss << "pg-upmap-primary is only supported for replicated pools";
err = -EINVAL;
goto reply;
}
}
// fall through
case OP_PG_UPMAP_ITEMS: // fall through
case OP_RM_PG_UPMAP_ITEMS: // fall through
if (pending_inc.new_pg_upmap_items.count(pgid) ||
pending_inc.old_pg_upmap_items.count(pgid)) {
dout(10) << __func__ << " waiting for pending update on "
<< pgid << dendl;
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
break;
default:
ceph_abort_msg("invalid upmap option");
}
switch (upmap_option) {
case OP_PG_UPMAP:
{
vector<int64_t> id_vec;
if (!cmd_getval(cmdmap, "id", id_vec)) {
ss << "unable to parse 'id' value(s) '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "'";
err = -EINVAL;
goto reply;
}
int pool_min_size = osdmap.get_pg_pool_min_size(pgid);
if ((int)id_vec.size() < pool_min_size) {
ss << "num of osds (" << id_vec.size() <<") < pool min size ("
<< pool_min_size << ")";
err = -EINVAL;
goto reply;
}
int pool_size = osdmap.get_pg_pool_size(pgid);
if ((int)id_vec.size() > pool_size) {
ss << "num of osds (" << id_vec.size() <<") > pool size ("
<< pool_size << ")";
err = -EINVAL;
goto reply;
}
vector<int32_t> new_pg_upmap;
for (auto osd : id_vec) {
if (osd != CRUSH_ITEM_NONE && !osdmap.exists(osd)) {
ss << "osd." << osd << " does not exist";
err = -ENOENT;
goto reply;
}
auto it = std::find(new_pg_upmap.begin(), new_pg_upmap.end(), osd);
if (it != new_pg_upmap.end()) {
ss << "osd." << osd << " already exists, ";
continue;
}
new_pg_upmap.push_back(osd);
}
if (new_pg_upmap.empty()) {
ss << "no valid upmap items(pairs) is specified";
err = -EINVAL;
goto reply;
}
pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>(
new_pg_upmap.begin(), new_pg_upmap.end());
ss << "set " << pgid << " pg_upmap mapping to " << new_pg_upmap;
}
break;
case OP_RM_PG_UPMAP:
{
pending_inc.old_pg_upmap.insert(pgid);
ss << "clear " << pgid << " pg_upmap mapping";
}
break;
case OP_PG_UPMAP_ITEMS:
{
vector<int64_t> id_vec;
if (!cmd_getval(cmdmap, "id", id_vec)) {
ss << "unable to parse 'id' value(s) '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "'";
err = -EINVAL;
goto reply;
}
if (id_vec.size() % 2) {
ss << "you must specify pairs of osd ids to be remapped";
err = -EINVAL;
goto reply;
}
int pool_size = osdmap.get_pg_pool_size(pgid);
if ((int)(id_vec.size() / 2) > pool_size) {
ss << "num of osd pairs (" << id_vec.size() / 2 <<") > pool size ("
<< pool_size << ")";
err = -EINVAL;
goto reply;
}
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
ostringstream items;
items << "[";
for (auto p = id_vec.begin(); p != id_vec.end(); ++p) {
int from = *p++;
int to = *p;
if (from == to) {
ss << "from osd." << from << " == to osd." << to << ", ";
continue;
}
if (!osdmap.exists(from)) {
ss << "osd." << from << " does not exist";
err = -ENOENT;
goto reply;
}
if (to != CRUSH_ITEM_NONE && !osdmap.exists(to)) {
ss << "osd." << to << " does not exist";
err = -ENOENT;
goto reply;
}
pair<int32_t,int32_t> entry = make_pair(from, to);
auto it = std::find(new_pg_upmap_items.begin(),
new_pg_upmap_items.end(), entry);
if (it != new_pg_upmap_items.end()) {
ss << "osd." << from << " -> osd." << to << " already exists, ";
continue;
}
new_pg_upmap_items.push_back(entry);
items << from << "->" << to << ",";
}
string out(items.str());
out.resize(out.size() - 1); // drop last ','
out += "]";
if (new_pg_upmap_items.empty()) {
ss << "no valid upmap items(pairs) is specified";
err = -EINVAL;
goto reply;
}
pending_inc.new_pg_upmap_items[pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
ss << "set " << pgid << " pg_upmap_items mapping to " << out;
}
break;
case OP_RM_PG_UPMAP_ITEMS:
{
pending_inc.old_pg_upmap_items.insert(pgid);
ss << "clear " << pgid << " pg_upmap_items mapping";
}
break;
case OP_PG_UPMAP_PRIMARY:
{
int64_t id;
if (!cmd_getval(cmdmap, "id", id)) {
ss << "invalid osd id value '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "'";
err = -EINVAL;
goto reply;
}
if (id != CRUSH_ITEM_NONE && !osdmap.exists(id)) {
ss << "osd." << id << " does not exist";
err = -ENOENT;
goto reply;
}
vector<int> acting;
int primary;
osdmap.pg_to_acting_osds(pgid, &acting, &primary);
if (id == primary) {
ss << "osd." << id << " is already primary for pg " << pgid;
err = -EINVAL;
goto reply;
}
int found_idx = 0;
for (int i = 1 ; i < (int)acting.size(); i++) { // skip 0 on purpose
if (acting[i] == id) {
found_idx = i;
break;
}
}
if (found_idx == 0) {
ss << "osd." << id << " is not in acting set for pg " << pgid;
err = -EINVAL;
goto reply;
}
vector<int> new_acting(acting);
new_acting[found_idx] = new_acting[0];
new_acting[0] = id;
int pool_size = osdmap.get_pg_pool_size(pgid);
if (osdmap.crush->verify_upmap(cct, osdmap.get_pg_pool_crush_rule(pgid),
pool_size, new_acting) >= 0) {
ss << "change primary for pg " << pgid << " to osd." << id;
}
else {
ss << "can't change primary for pg " << pgid << " to osd." << id
<< " - illegal pg after the change";
err = -EINVAL;
goto reply;
}
pending_inc.new_pg_upmap_primary[pgid] = id;
//TO-REMOVE:
ldout(cct, 20) << "pg " << pgid << ": set pg_upmap_primary to " << id << dendl;
}
break;
case OP_RM_PG_UPMAP_PRIMARY:
{
pending_inc.old_pg_upmap_primary.insert(pgid);
ss << "clear " << pgid << " pg_upmap_primary mapping";
}
break;
default:
ceph_abort_msg("invalid upmap option");
}
goto update;
} else if (prefix == "osd primary-affinity") {
int64_t id;
if (!cmd_getval(cmdmap, "id", id)) {
ss << "invalid osd id value '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "'";
err = -EINVAL;
goto reply;
}
double w;
if (!cmd_getval(cmdmap, "weight", w)) {
ss << "unable to parse 'weight' value '"
<< cmd_vartype_stringify(cmdmap.at("weight")) << "'";
err = -EINVAL;
goto reply;
}
long ww = (int)((double)CEPH_OSD_MAX_PRIMARY_AFFINITY*w);
if (ww < 0L) {
ss << "weight must be >= 0";
err = -EINVAL;
goto reply;
}
if (osdmap.require_min_compat_client != ceph_release_t::unknown &&
osdmap.require_min_compat_client < ceph_release_t::firefly) {
ss << "require_min_compat_client "
<< osdmap.require_min_compat_client
<< " < firefly, which is required for primary-affinity";
err = -EPERM;
goto reply;
}
if (osdmap.exists(id)) {
pending_inc.new_primary_affinity[id] = ww;
ss << "set osd." << id << " primary-affinity to " << w << " (" << std::ios::hex << ww << std::ios::dec << ")";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else {
ss << "osd." << id << " does not exist";
err = -ENOENT;
goto reply;
}
} else if (prefix == "osd reweight") {
int64_t id;
if (!cmd_getval(cmdmap, "id", id)) {
ss << "unable to parse osd id value '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "'";
err = -EINVAL;
goto reply;
}
double w;
if (!cmd_getval(cmdmap, "weight", w)) {
ss << "unable to parse weight value '"
<< cmd_vartype_stringify(cmdmap.at("weight")) << "'";
err = -EINVAL;
goto reply;
}
long ww = (int)((double)CEPH_OSD_IN*w);
if (ww < 0L) {
ss << "weight must be >= 0";
err = -EINVAL;
goto reply;
}
if (osdmap.exists(id)) {
pending_inc.new_weight[id] = ww;
ss << "reweighted osd." << id << " to " << w << " (" << std::hex << ww << std::dec << ")";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else {
ss << "osd." << id << " does not exist";
err = -ENOENT;
goto reply;
}
} else if (prefix == "osd reweightn") {
map<int32_t, uint32_t> weights;
err = parse_reweights(cct, cmdmap, osdmap, &weights);
if (err) {
ss << "unable to parse 'weights' value '"
<< cmd_vartype_stringify(cmdmap.at("weights")) << "'";
goto reply;
}
pending_inc.new_weight.insert(weights.begin(), weights.end());
wait_for_finished_proposal(
op,
new Monitor::C_Command(mon, op, 0, rs, rdata, get_last_committed() + 1));
return true;
} else if (prefix == "osd lost") {
int64_t id;
if (!cmd_getval(cmdmap, "id", id)) {
ss << "unable to parse osd id value '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "'";
err = -EINVAL;
goto reply;
}
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) {
ss << "are you SURE? this might mean real, permanent data loss. pass "
"--yes-i-really-mean-it if you really do.";
err = -EPERM;
goto reply;
} else if (!osdmap.exists(id)) {
ss << "osd." << id << " does not exist";
err = -ENOENT;
goto reply;
} else if (!osdmap.is_down(id)) {
ss << "osd." << id << " is not down";
err = -EBUSY;
goto reply;
} else {
epoch_t e = osdmap.get_info(id).down_at;
pending_inc.new_lost[id] = e;
ss << "marked osd lost in epoch " << e;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
}
} else if (prefix == "osd destroy-actual" ||
prefix == "osd purge-actual" ||
prefix == "osd purge-new") {
/* Destroying an OSD means that we don't expect to further make use of
* the OSDs data (which may even become unreadable after this operation),
* and that we are okay with scrubbing all its cephx keys and config-key
* data (which may include lockbox keys, thus rendering the osd's data
* unreadable).
*
* The OSD will not be removed. Instead, we will mark it as destroyed,
* such that a subsequent call to `create` will not reuse the osd id.
* This will play into being able to recreate the OSD, at the same
* crush location, with minimal data movement.
*/
// make sure authmon is writeable.
if (!mon.authmon()->is_writeable()) {
dout(10) << __func__ << " waiting for auth mon to be writeable for "
<< "osd destroy" << dendl;
mon.authmon()->wait_for_writeable(op, new C_RetryMessage(this, op));
return false;
}
int64_t id;
if (!cmd_getval(cmdmap, "id", id)) {
auto p = cmdmap.find("id");
if (p == cmdmap.end()) {
ss << "no osd id specified";
} else {
ss << "unable to parse osd id value '"
<< cmd_vartype_stringify(cmdmap.at("id")) << "";
}
err = -EINVAL;
goto reply;
}
bool is_destroy = (prefix == "osd destroy-actual");
if (!is_destroy) {
ceph_assert("osd purge-actual" == prefix ||
"osd purge-new" == prefix);
}
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) {
ss << "Are you SURE? Did you verify with 'ceph osd safe-to-destroy'? "
<< "This will mean real, permanent data loss, as well "
<< "as deletion of cephx and lockbox keys. "
<< "Pass --yes-i-really-mean-it if you really do.";
err = -EPERM;
goto reply;
} else if (!osdmap.exists(id)) {
ss << "osd." << id << " does not exist";
err = 0; // idempotent
goto reply;
} else if (osdmap.is_up(id)) {
ss << "osd." << id << " is not `down`.";
err = -EBUSY;
goto reply;
} else if (is_destroy && osdmap.is_destroyed(id)) {
ss << "destroyed osd." << id;
err = 0;
goto reply;
}
if (prefix == "osd purge-new" &&
(osdmap.get_state(id) & CEPH_OSD_NEW) == 0) {
ss << "osd." << id << " is not new";
err = -EPERM;
goto reply;
}
bool goto_reply = false;
paxos.plug();
if (is_destroy) {
err = prepare_command_osd_destroy(id, ss);
// we checked above that it should exist.
ceph_assert(err != -ENOENT);
} else {
err = prepare_command_osd_purge(id, ss);
if (err == -ENOENT) {
err = 0;
ss << "osd." << id << " does not exist.";
goto_reply = true;
}
}
paxos.unplug();
if (err < 0 || goto_reply) {
goto reply;
}
if (is_destroy) {
ss << "destroyed osd." << id;
} else {
ss << "purged osd." << id;
}
getline(ss, rs);
wait_for_finished_proposal(op,
new Monitor::C_Command(mon, op, 0, rs, get_last_committed() + 1));
force_immediate_propose();
return true;
} else if (prefix == "osd new") {
// make sure authmon is writeable.
if (!mon.authmon()->is_writeable()) {
dout(10) << __func__ << " waiting for auth mon to be writeable for "
<< "osd new" << dendl;
mon.authmon()->wait_for_writeable(op, new C_RetryMessage(this, op));
return false;
}
// make sure kvmon is writeable.
if (!mon.kvmon()->is_writeable()) {
dout(10) << __func__ << " waiting for kv mon to be writeable for "
<< "osd new" << dendl;
mon.kvmon()->wait_for_writeable(op, new C_RetryMessage(this, op));
return false;
}
map<string,string> param_map;
bufferlist bl = m->get_data();
string param_json = bl.to_str();
dout(20) << __func__ << " osd new json = " << param_json << dendl;
err = get_json_str_map(param_json, ss, ¶m_map);
if (err < 0)
goto reply;
dout(20) << __func__ << " osd new params " << param_map << dendl;
paxos.plug();
err = prepare_command_osd_new(op, cmdmap, param_map, ss, f.get());
paxos.unplug();
if (err < 0) {
goto reply;
}
if (f) {
f->flush(rdata);
} else {
rdata.append(ss);
}
if (err == EEXIST) {
// idempotent operation
err = 0;
goto reply;
}
wait_for_finished_proposal(op,
new Monitor::C_Command(mon, op, 0, rs, rdata,
get_last_committed() + 1));
force_immediate_propose();
return true;
} else if (prefix == "osd create") {
// optional id provided?
int64_t id = -1, cmd_id = -1;
if (cmd_getval(cmdmap, "id", cmd_id)) {
if (cmd_id < 0) {
ss << "invalid osd id value '" << cmd_id << "'";
err = -EINVAL;
goto reply;
}
dout(10) << " osd create got id " << cmd_id << dendl;
}
uuid_d uuid;
string uuidstr;
if (cmd_getval(cmdmap, "uuid", uuidstr)) {
if (!uuid.parse(uuidstr.c_str())) {
ss << "invalid uuid value '" << uuidstr << "'";
err = -EINVAL;
goto reply;
}
// we only care about the id if we also have the uuid, to
// ensure the operation's idempotency.
id = cmd_id;
}
int32_t new_id = -1;
err = prepare_command_osd_create(id, uuid, &new_id, ss);
if (err < 0) {
if (err == -EAGAIN) {
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
// a check has failed; reply to the user.
goto reply;
} else if (err == EEXIST) {
// this is an idempotent operation; we can go ahead and reply.
if (f) {
f->open_object_section("created_osd");
f->dump_int("osdid", new_id);
f->close_section();
f->flush(rdata);
} else {
ss << new_id;
rdata.append(ss);
}
err = 0;
goto reply;
}
string empty_device_class;
do_osd_create(id, uuid, empty_device_class, &new_id);
if (f) {
f->open_object_section("created_osd");
f->dump_int("osdid", new_id);
f->close_section();
f->flush(rdata);
} else {
ss << new_id;
rdata.append(ss);
}
wait_for_finished_proposal(op,
new Monitor::C_Command(mon, op, 0, rs, rdata,
get_last_committed() + 1));
return true;
} else if (prefix == "osd blocklist clear" ||
prefix == "osd blacklist clear") {
pending_inc.new_blocklist.clear();
std::list<std::pair<entity_addr_t,utime_t > > blocklist;
std::list<std::pair<entity_addr_t,utime_t > > range_b;
osdmap.get_blocklist(&blocklist, &range_b);
for (const auto &entry : blocklist) {
pending_inc.old_blocklist.push_back(entry.first);
}
for (const auto &entry : range_b) {
pending_inc.old_range_blocklist.push_back(entry.first);
}
ss << " removed all blocklist entries";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd blocklist" ||
prefix == "osd blacklist") {
string addrstr, rangestr;
bool range = false;
cmd_getval(cmdmap, "addr", addrstr);
if (cmd_getval(cmdmap, "range", rangestr)) {
if (rangestr == "range") {
range = true;
} else {
ss << "Did you mean to specify \"osd blocklist range\"?";
err = -EINVAL;
goto reply;
}
}
entity_addr_t addr;
if (!addr.parse(addrstr)) {
ss << "unable to parse address " << addrstr;
err = -EINVAL;
goto reply;
}
else {
if (range) {
if (!addr.maybe_cidr()) {
ss << "You specified a range command, but " << addr
<< " does not parse as a CIDR range";
err = -EINVAL;
goto reply;
}
addr.type = entity_addr_t::TYPE_CIDR;
err = check_cluster_features(CEPH_FEATUREMASK_RANGE_BLOCKLIST, ss);
if (err) {
goto reply;
}
if ((addr.is_ipv4() && addr.get_nonce() > 32) ||
(addr.is_ipv6() && addr.get_nonce() > 128)) {
ss << "Too many bits in range for that protocol!";
err = -EINVAL;
goto reply;
}
} else {
if (osdmap.require_osd_release >= ceph_release_t::nautilus) {
// always blocklist type ANY
addr.set_type(entity_addr_t::TYPE_ANY);
} else {
addr.set_type(entity_addr_t::TYPE_LEGACY);
}
}
string blocklistop;
if (!cmd_getval(cmdmap, "blocklistop", blocklistop)) {
cmd_getval(cmdmap, "blacklistop", blocklistop);
}
if (blocklistop == "add") {
utime_t expires = ceph_clock_now();
// default one hour
double d = cmd_getval_or<double>(cmdmap, "expire",
g_conf()->mon_osd_blocklist_default_expire);
expires += d;
auto add_to_pending_blocklists = [](auto& nb, auto& ob,
const auto& addr,
const auto& expires) {
nb[addr] = expires;
// cancel any pending un-blocklisting request too
auto it = std::find(ob.begin(),
ob.end(), addr);
if (it != ob.end()) {
ob.erase(it);
}
};
if (range) {
add_to_pending_blocklists(pending_inc.new_range_blocklist,
pending_inc.old_range_blocklist,
addr, expires);
} else {
add_to_pending_blocklists(pending_inc.new_blocklist,
pending_inc.old_blocklist,
addr, expires);
}
ss << "blocklisting " << addr << " until " << expires << " (" << d << " sec)";
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (blocklistop == "rm") {
auto rm_from_pending_blocklists = [](const auto& addr,
auto& blocklist,
auto& ob, auto& pb) {
if (blocklist.count(addr)) {
ob.push_back(addr);
return true;
} else if (pb.count(addr)) {
pb.erase(addr);
return true;
}
return false;
};
if ((!range && rm_from_pending_blocklists(addr, osdmap.blocklist,
pending_inc.old_blocklist,
pending_inc.new_blocklist)) ||
(range && rm_from_pending_blocklists(addr, osdmap.range_blocklist,
pending_inc.old_range_blocklist,
pending_inc.new_range_blocklist))) {
ss << "un-blocklisting " << addr;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
}
ss << addr << " isn't blocklisted";
err = 0;
goto reply;
}
}
} else if (prefix == "osd pool mksnap") {
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool = osdmap.lookup_pg_pool_name(poolstr.c_str());
if (pool < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
string snapname;
cmd_getval(cmdmap, "snap", snapname);
const pg_pool_t *p = osdmap.get_pg_pool(pool);
if (p->is_unmanaged_snaps_mode()) {
ss << "pool " << poolstr << " is in unmanaged snaps mode";
err = -EINVAL;
goto reply;
} else if (p->snap_exists(snapname.c_str())) {
ss << "pool " << poolstr << " snap " << snapname << " already exists";
err = 0;
goto reply;
} else if (p->is_tier()) {
ss << "pool " << poolstr << " is a cache tier";
err = -EINVAL;
goto reply;
}
pg_pool_t *pp = 0;
if (pending_inc.new_pools.count(pool))
pp = &pending_inc.new_pools[pool];
if (!pp) {
pp = &pending_inc.new_pools[pool];
*pp = *p;
}
if (pp->snap_exists(snapname.c_str())) {
ss << "pool " << poolstr << " snap " << snapname << " already exists";
} else {
if (const auto& fsmap = mon.mdsmon()->get_fsmap(); fsmap.pool_in_use(pool)) {
dout(20) << "pool-level snapshots have been disabled for pools "
"attached to an fs - poolid:" << pool << dendl;
err = -EOPNOTSUPP;
goto reply;
}
pp->add_snap(snapname.c_str(), ceph_clock_now());
pp->set_snap_epoch(pending_inc.epoch);
ss << "created pool " << poolstr << " snap " << snapname;
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd pool rmsnap") {
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool = osdmap.lookup_pg_pool_name(poolstr.c_str());
if (pool < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
string snapname;
cmd_getval(cmdmap, "snap", snapname);
const pg_pool_t *p = osdmap.get_pg_pool(pool);
if (p->is_unmanaged_snaps_mode()) {
ss << "pool " << poolstr << " is in unmanaged snaps mode";
err = -EINVAL;
goto reply;
} else if (!p->snap_exists(snapname.c_str())) {
ss << "pool " << poolstr << " snap " << snapname << " does not exist";
err = 0;
goto reply;
}
pg_pool_t *pp = 0;
if (pending_inc.new_pools.count(pool))
pp = &pending_inc.new_pools[pool];
if (!pp) {
pp = &pending_inc.new_pools[pool];
*pp = *p;
}
snapid_t sn = pp->snap_exists(snapname.c_str());
if (sn) {
pp->remove_snap(sn);
pp->set_snap_epoch(pending_inc.epoch);
ss << "removed pool " << poolstr << " snap " << snapname;
} else {
ss << "already removed pool " << poolstr << " snap " << snapname;
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd pool create") {
int64_t pg_num = cmd_getval_or<int64_t>(cmdmap, "pg_num", 0);
int64_t pg_num_min = cmd_getval_or<int64_t>(cmdmap, "pg_num_min", 0);
int64_t pg_num_max = cmd_getval_or<int64_t>(cmdmap, "pg_num_max", 0);
int64_t pgp_num = cmd_getval_or<int64_t>(cmdmap, "pgp_num", pg_num);
string pool_type_str;
cmd_getval(cmdmap, "pool_type", pool_type_str);
if (pool_type_str.empty())
pool_type_str = g_conf().get_val<string>("osd_pool_default_type");
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
bool confirm = false;
//confirmation may be set to true only by internal operations.
cmd_getval(cmdmap, "yes_i_really_mean_it", confirm);
if (poolstr[0] == '.' && !confirm) {
ss << "pool names beginning with . are not allowed";
err = 0;
goto reply;
}
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id >= 0) {
const pg_pool_t *p = osdmap.get_pg_pool(pool_id);
if (pool_type_str != p->get_type_name()) {
ss << "pool '" << poolstr << "' cannot change to type " << pool_type_str;
err = -EINVAL;
} else {
ss << "pool '" << poolstr << "' already exists";
err = 0;
}
goto reply;
}
int pool_type;
if (pool_type_str == "replicated") {
pool_type = pg_pool_t::TYPE_REPLICATED;
} else if (pool_type_str == "erasure") {
pool_type = pg_pool_t::TYPE_ERASURE;
} else {
ss << "unknown pool type '" << pool_type_str << "'";
err = -EINVAL;
goto reply;
}
bool implicit_rule_creation = false;
int64_t expected_num_objects = 0;
string rule_name;
cmd_getval(cmdmap, "rule", rule_name);
string erasure_code_profile;
cmd_getval(cmdmap, "erasure_code_profile", erasure_code_profile);
if (pool_type == pg_pool_t::TYPE_ERASURE) {
if (erasure_code_profile == "")
erasure_code_profile = "default";
//handle the erasure code profile
if (erasure_code_profile == "default") {
if (!osdmap.has_erasure_code_profile(erasure_code_profile)) {
if (pending_inc.has_erasure_code_profile(erasure_code_profile)) {
dout(20) << "erasure code profile " << erasure_code_profile << " already pending" << dendl;
goto wait;
}
map<string,string> profile_map;
err = osdmap.get_erasure_code_profile_default(cct,
profile_map,
&ss);
if (err)
goto reply;
dout(20) << "erasure code profile " << erasure_code_profile << " set" << dendl;
pending_inc.set_erasure_code_profile(erasure_code_profile, profile_map);
goto wait;
}
}
if (rule_name == "") {
implicit_rule_creation = true;
if (erasure_code_profile == "default") {
rule_name = "erasure-code";
} else {
dout(1) << "implicitly use rule named after the pool: "
<< poolstr << dendl;
rule_name = poolstr;
}
}
expected_num_objects =
cmd_getval_or<int64_t>(cmdmap, "expected_num_objects", 0);
} else {
//NOTE:for replicated pool,cmd_map will put rule_name to erasure_code_profile field
// and put expected_num_objects to rule field
if (erasure_code_profile != "") { // cmd is from CLI
if (rule_name != "") {
string interr;
expected_num_objects = strict_strtoll(rule_name.c_str(), 10, &interr);
if (interr.length()) {
ss << "error parsing integer value '" << rule_name << "': " << interr;
err = -EINVAL;
goto reply;
}
}
rule_name = erasure_code_profile;
} else { // cmd is well-formed
expected_num_objects =
cmd_getval_or<int64_t>(cmdmap, "expected_num_objects", 0);
}
}
if (!implicit_rule_creation && rule_name != "") {
int rule;
err = get_crush_rule(rule_name, &rule, &ss);
if (err == -EAGAIN) {
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
if (err)
goto reply;
}
if (expected_num_objects < 0) {
ss << "'expected_num_objects' must be non-negative";
err = -EINVAL;
goto reply;
}
int64_t fast_read_param = cmd_getval_or<int64_t>(cmdmap, "fast_read", -1);
FastReadType fast_read = FAST_READ_DEFAULT;
if (fast_read_param == 0)
fast_read = FAST_READ_OFF;
else if (fast_read_param > 0)
fast_read = FAST_READ_ON;
int64_t repl_size = 0;
cmd_getval(cmdmap, "size", repl_size);
int64_t target_size_bytes = 0;
double target_size_ratio = 0.0;
cmd_getval(cmdmap, "target_size_bytes", target_size_bytes);
cmd_getval(cmdmap, "target_size_ratio", target_size_ratio);
string pg_autoscale_mode;
cmd_getval(cmdmap, "autoscale_mode", pg_autoscale_mode);
bool bulk = cmd_getval_or<bool>(cmdmap, "bulk", 0);
bool crimson = cmd_getval_or<bool>(cmdmap, "crimson", false) ||
cct->_conf.get_val<bool>("osd_pool_default_crimson");
err = prepare_new_pool(poolstr,
-1, // default crush rule
rule_name,
pg_num, pgp_num, pg_num_min, pg_num_max,
repl_size, target_size_bytes, target_size_ratio,
erasure_code_profile, pool_type,
(uint64_t)expected_num_objects,
fast_read,
pg_autoscale_mode,
bulk,
crimson,
&ss);
if (err < 0) {
switch(err) {
case -EEXIST:
ss << "pool '" << poolstr << "' already exists";
break;
case -EAGAIN:
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
case -ERANGE:
goto reply;
default:
goto reply;
break;
}
} else {
ss << "pool '" << poolstr << "' created";
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd pool delete" ||
prefix == "osd pool rm") {
// osd pool delete/rm <poolname> <poolname again> --yes-i-really-really-mean-it
string poolstr, poolstr2, sure;
cmd_getval(cmdmap, "pool", poolstr);
cmd_getval(cmdmap, "pool2", poolstr2);
int64_t pool = osdmap.lookup_pg_pool_name(poolstr.c_str());
if (pool < 0) {
ss << "pool '" << poolstr << "' does not exist";
err = 0;
goto reply;
}
bool force_no_fake = false;
cmd_getval(cmdmap, "yes_i_really_really_mean_it", force_no_fake);
bool force = false;
cmd_getval(cmdmap, "yes_i_really_really_mean_it_not_faking", force);
if (poolstr2 != poolstr ||
(!force && !force_no_fake)) {
ss << "WARNING: this will *PERMANENTLY DESTROY* all data stored in pool " << poolstr
<< ". If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, "
<< "followed by --yes-i-really-really-mean-it.";
err = -EPERM;
goto reply;
}
err = _prepare_remove_pool(pool, &ss, force_no_fake);
if (err == -EAGAIN) {
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
if (err < 0)
goto reply;
goto update;
} else if (prefix == "osd pool rename") {
string srcpoolstr, destpoolstr;
cmd_getval(cmdmap, "srcpool", srcpoolstr);
cmd_getval(cmdmap, "destpool", destpoolstr);
int64_t pool_src = osdmap.lookup_pg_pool_name(srcpoolstr.c_str());
int64_t pool_dst = osdmap.lookup_pg_pool_name(destpoolstr.c_str());
bool confirm = false;
//confirmation may be set to true only by internal operations.
cmd_getval(cmdmap, "yes_i_really_mean_it", confirm);
if (destpoolstr[0] == '.' && !confirm) {
ss << "pool names beginning with . are not allowed";
err = 0;
goto reply;
}
if (pool_src < 0) {
if (pool_dst >= 0) {
// src pool doesn't exist, dst pool does exist: to ensure idempotency
// of operations, assume this rename succeeded, as it is not changing
// the current state. Make sure we output something understandable
// for whoever is issuing the command, if they are paying attention,
// in case it was not intentional; or to avoid a "wtf?" and a bug
// report in case it was intentional, while expecting a failure.
ss << "pool '" << srcpoolstr << "' does not exist; pool '"
<< destpoolstr << "' does -- assuming successful rename";
err = 0;
} else {
ss << "unrecognized pool '" << srcpoolstr << "'";
err = -ENOENT;
}
goto reply;
} else if (pool_dst >= 0) {
// source pool exists and so does the destination pool
ss << "pool '" << destpoolstr << "' already exists";
err = -EEXIST;
goto reply;
}
int ret = _prepare_rename_pool(pool_src, destpoolstr);
if (ret == 0) {
ss << "pool '" << srcpoolstr << "' renamed to '" << destpoolstr << "'";
} else {
ss << "failed to rename pool '" << srcpoolstr << "' to '" << destpoolstr << "': "
<< cpp_strerror(ret);
}
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, ret, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd pool set") {
err = prepare_command_pool_set(cmdmap, ss);
if (err == -EAGAIN)
goto wait;
if (err < 0)
goto reply;
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd tier add") {
err = check_cluster_features(CEPH_FEATURE_OSD_CACHEPOOL, ss);
if (err == -EAGAIN)
goto wait;
if (err)
goto reply;
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
string tierpoolstr;
cmd_getval(cmdmap, "tierpool", tierpoolstr);
int64_t tierpool_id = osdmap.lookup_pg_pool_name(tierpoolstr);
if (tierpool_id < 0) {
ss << "unrecognized pool '" << tierpoolstr << "'";
err = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(pool_id);
ceph_assert(p);
const pg_pool_t *tp = osdmap.get_pg_pool(tierpool_id);
ceph_assert(tp);
if (!_check_become_tier(tierpool_id, tp, pool_id, p, &err, &ss)) {
goto reply;
}
// make sure new tier is empty
bool force_nonempty = false;
cmd_getval_compat_cephbool(cmdmap, "force_nonempty", force_nonempty);
const pool_stat_t *pstats = mon.mgrstatmon()->get_pool_stat(tierpool_id);
if (pstats && pstats->stats.sum.num_objects != 0 &&
!force_nonempty) {
ss << "tier pool '" << tierpoolstr << "' is not empty; --force-nonempty to force";
err = -ENOTEMPTY;
goto reply;
}
if (tp->is_erasure()) {
ss << "tier pool '" << tierpoolstr
<< "' is an ec pool, which cannot be a tier";
err = -ENOTSUP;
goto reply;
}
if ((!tp->removed_snaps.empty() || !tp->snaps.empty()) &&
(!force_nonempty ||
!g_conf()->mon_debug_unsafe_allow_tier_with_nonempty_snaps)) {
ss << "tier pool '" << tierpoolstr << "' has snapshot state; it cannot be added as a tier without breaking the pool";
err = -ENOTEMPTY;
goto reply;
}
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
pg_pool_t *ntp = pending_inc.get_new_pool(tierpool_id, tp);
if (np->tiers.count(tierpool_id) || ntp->is_tier()) {
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
np->tiers.insert(tierpool_id);
np->set_snap_epoch(pending_inc.epoch); // tier will update to our snap info
ntp->tier_of = pool_id;
ss << "pool '" << tierpoolstr << "' is now (or already was) a tier of '" << poolstr << "'";
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd tier remove" ||
prefix == "osd tier rm") {
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
string tierpoolstr;
cmd_getval(cmdmap, "tierpool", tierpoolstr);
int64_t tierpool_id = osdmap.lookup_pg_pool_name(tierpoolstr);
if (tierpool_id < 0) {
ss << "unrecognized pool '" << tierpoolstr << "'";
err = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(pool_id);
ceph_assert(p);
const pg_pool_t *tp = osdmap.get_pg_pool(tierpool_id);
ceph_assert(tp);
if (!_check_remove_tier(pool_id, p, tp, &err, &ss)) {
goto reply;
}
if (p->tiers.count(tierpool_id) == 0) {
ss << "pool '" << tierpoolstr << "' is now (or already was) not a tier of '" << poolstr << "'";
err = 0;
goto reply;
}
if (tp->tier_of != pool_id) {
ss << "tier pool '" << tierpoolstr << "' is a tier of '"
<< osdmap.get_pool_name(tp->tier_of) << "': "
// be scary about it; this is an inconsistency and bells must go off
<< "THIS SHOULD NOT HAVE HAPPENED AT ALL";
err = -EINVAL;
goto reply;
}
if (p->read_tier == tierpool_id) {
ss << "tier pool '" << tierpoolstr << "' is the overlay for '" << poolstr << "'; please remove-overlay first";
err = -EBUSY;
goto reply;
}
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
pg_pool_t *ntp = pending_inc.get_new_pool(tierpool_id, tp);
if (np->tiers.count(tierpool_id) == 0 ||
ntp->tier_of != pool_id ||
np->read_tier == tierpool_id) {
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
np->tiers.erase(tierpool_id);
ntp->clear_tier();
ss << "pool '" << tierpoolstr << "' is now (or already was) not a tier of '" << poolstr << "'";
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd tier set-overlay") {
err = check_cluster_features(CEPH_FEATURE_OSD_CACHEPOOL, ss);
if (err == -EAGAIN)
goto wait;
if (err)
goto reply;
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
string overlaypoolstr;
cmd_getval(cmdmap, "overlaypool", overlaypoolstr);
int64_t overlaypool_id = osdmap.lookup_pg_pool_name(overlaypoolstr);
if (overlaypool_id < 0) {
ss << "unrecognized pool '" << overlaypoolstr << "'";
err = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(pool_id);
ceph_assert(p);
const pg_pool_t *overlay_p = osdmap.get_pg_pool(overlaypool_id);
ceph_assert(overlay_p);
if (p->tiers.count(overlaypool_id) == 0) {
ss << "tier pool '" << overlaypoolstr << "' is not a tier of '" << poolstr << "'";
err = -EINVAL;
goto reply;
}
if (p->read_tier == overlaypool_id) {
err = 0;
ss << "overlay for '" << poolstr << "' is now (or already was) '" << overlaypoolstr << "'";
goto reply;
}
if (p->has_read_tier()) {
ss << "pool '" << poolstr << "' has overlay '"
<< osdmap.get_pool_name(p->read_tier)
<< "'; please remove-overlay first";
err = -EINVAL;
goto reply;
}
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
np->read_tier = overlaypool_id;
np->write_tier = overlaypool_id;
np->set_last_force_op_resend(pending_inc.epoch);
pg_pool_t *noverlay_p = pending_inc.get_new_pool(overlaypool_id, overlay_p);
noverlay_p->set_last_force_op_resend(pending_inc.epoch);
ss << "overlay for '" << poolstr << "' is now (or already was) '" << overlaypoolstr << "'";
if (overlay_p->cache_mode == pg_pool_t::CACHEMODE_NONE)
ss <<" (WARNING: overlay pool cache_mode is still NONE)";
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd tier remove-overlay" ||
prefix == "osd tier rm-overlay") {
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(pool_id);
ceph_assert(p);
if (!p->has_read_tier()) {
err = 0;
ss << "there is now (or already was) no overlay for '" << poolstr << "'";
goto reply;
}
if (!_check_remove_tier(pool_id, p, NULL, &err, &ss)) {
goto reply;
}
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
if (np->has_read_tier()) {
const pg_pool_t *op = osdmap.get_pg_pool(np->read_tier);
pg_pool_t *nop = pending_inc.get_new_pool(np->read_tier,op);
nop->set_last_force_op_resend(pending_inc.epoch);
}
if (np->has_write_tier()) {
const pg_pool_t *op = osdmap.get_pg_pool(np->write_tier);
pg_pool_t *nop = pending_inc.get_new_pool(np->write_tier, op);
nop->set_last_force_op_resend(pending_inc.epoch);
}
np->clear_read_tier();
np->clear_write_tier();
np->set_last_force_op_resend(pending_inc.epoch);
ss << "there is now (or already was) no overlay for '" << poolstr << "'";
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd tier cache-mode") {
err = check_cluster_features(CEPH_FEATURE_OSD_CACHEPOOL, ss);
if (err == -EAGAIN)
goto wait;
if (err)
goto reply;
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(pool_id);
ceph_assert(p);
if (!p->is_tier()) {
ss << "pool '" << poolstr << "' is not a tier";
err = -EINVAL;
goto reply;
}
string modestr;
cmd_getval(cmdmap, "mode", modestr);
pg_pool_t::cache_mode_t mode = pg_pool_t::get_cache_mode_from_str(modestr);
if (int(mode) < 0) {
ss << "'" << modestr << "' is not a valid cache mode";
err = -EINVAL;
goto reply;
}
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (mode == pg_pool_t::CACHEMODE_FORWARD ||
mode == pg_pool_t::CACHEMODE_READFORWARD) {
ss << "'" << modestr << "' is no longer a supported cache mode";
err = -EPERM;
goto reply;
}
if ((mode != pg_pool_t::CACHEMODE_WRITEBACK &&
mode != pg_pool_t::CACHEMODE_NONE &&
mode != pg_pool_t::CACHEMODE_PROXY &&
mode != pg_pool_t::CACHEMODE_READPROXY) &&
!sure) {
ss << "'" << modestr << "' is not a well-supported cache mode and may "
<< "corrupt your data. pass --yes-i-really-mean-it to force.";
err = -EPERM;
goto reply;
}
// pool already has this cache-mode set and there are no pending changes
if (p->cache_mode == mode &&
(pending_inc.new_pools.count(pool_id) == 0 ||
pending_inc.new_pools[pool_id].cache_mode == p->cache_mode)) {
ss << "set cache-mode for pool '" << poolstr << "'"
<< " to " << pg_pool_t::get_cache_mode_name(mode);
err = 0;
goto reply;
}
/* Mode description:
*
* none: No cache-mode defined
* forward: Forward all reads and writes to base pool [removed]
* writeback: Cache writes, promote reads from base pool
* readonly: Forward writes to base pool
* readforward: Writes are in writeback mode, Reads are in forward mode [removed]
* proxy: Proxy all reads and writes to base pool
* readproxy: Writes are in writeback mode, Reads are in proxy mode
*
* Hence, these are the allowed transitions:
*
* none -> any
* forward -> proxy || readforward || readproxy || writeback || any IF num_objects_dirty == 0
* proxy -> readproxy || writeback || any IF num_objects_dirty == 0
* readforward -> forward || proxy || readproxy || writeback || any IF num_objects_dirty == 0
* readproxy -> proxy || writeback || any IF num_objects_dirty == 0
* writeback -> readproxy || proxy
* readonly -> any
*/
// We check if the transition is valid against the current pool mode, as
// it is the only committed state thus far. We will blantly squash
// whatever mode is on the pending state.
if (p->cache_mode == pg_pool_t::CACHEMODE_WRITEBACK &&
(mode != pg_pool_t::CACHEMODE_PROXY &&
mode != pg_pool_t::CACHEMODE_READPROXY)) {
ss << "unable to set cache-mode '" << pg_pool_t::get_cache_mode_name(mode)
<< "' on a '" << pg_pool_t::get_cache_mode_name(p->cache_mode)
<< "' pool; only '"
<< pg_pool_t::get_cache_mode_name(pg_pool_t::CACHEMODE_PROXY)
<< "','"
<< pg_pool_t::get_cache_mode_name(pg_pool_t::CACHEMODE_READPROXY)
<< "' allowed.";
err = -EINVAL;
goto reply;
}
if ((p->cache_mode == pg_pool_t::CACHEMODE_READFORWARD &&
(mode != pg_pool_t::CACHEMODE_WRITEBACK &&
mode != pg_pool_t::CACHEMODE_PROXY &&
mode != pg_pool_t::CACHEMODE_READPROXY)) ||
(p->cache_mode == pg_pool_t::CACHEMODE_READPROXY &&
(mode != pg_pool_t::CACHEMODE_WRITEBACK &&
mode != pg_pool_t::CACHEMODE_PROXY)) ||
(p->cache_mode == pg_pool_t::CACHEMODE_PROXY &&
(mode != pg_pool_t::CACHEMODE_WRITEBACK &&
mode != pg_pool_t::CACHEMODE_READPROXY)) ||
(p->cache_mode == pg_pool_t::CACHEMODE_FORWARD &&
(mode != pg_pool_t::CACHEMODE_WRITEBACK &&
mode != pg_pool_t::CACHEMODE_PROXY &&
mode != pg_pool_t::CACHEMODE_READPROXY))) {
const pool_stat_t* pstats =
mon.mgrstatmon()->get_pool_stat(pool_id);
if (pstats && pstats->stats.sum.num_objects_dirty > 0) {
ss << "unable to set cache-mode '"
<< pg_pool_t::get_cache_mode_name(mode) << "' on pool '" << poolstr
<< "': dirty objects found";
err = -EBUSY;
goto reply;
}
}
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
np->cache_mode = mode;
// set this both when moving to and from cache_mode NONE. this is to
// capture legacy pools that were set up before this flag existed.
np->flags |= pg_pool_t::FLAG_INCOMPLETE_CLONES;
ss << "set cache-mode for pool '" << poolstr
<< "' to " << pg_pool_t::get_cache_mode_name(mode);
if (mode == pg_pool_t::CACHEMODE_NONE) {
const pg_pool_t *base_pool = osdmap.get_pg_pool(np->tier_of);
ceph_assert(base_pool);
if (base_pool->read_tier == pool_id ||
base_pool->write_tier == pool_id)
ss <<" (WARNING: pool is still configured as read or write tier)";
}
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd tier add-cache") {
err = check_cluster_features(CEPH_FEATURE_OSD_CACHEPOOL, ss);
if (err == -EAGAIN)
goto wait;
if (err)
goto reply;
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
string tierpoolstr;
cmd_getval(cmdmap, "tierpool", tierpoolstr);
int64_t tierpool_id = osdmap.lookup_pg_pool_name(tierpoolstr);
if (tierpool_id < 0) {
ss << "unrecognized pool '" << tierpoolstr << "'";
err = -ENOENT;
goto reply;
}
const pg_pool_t *p = osdmap.get_pg_pool(pool_id);
ceph_assert(p);
const pg_pool_t *tp = osdmap.get_pg_pool(tierpool_id);
ceph_assert(tp);
if (!_check_become_tier(tierpool_id, tp, pool_id, p, &err, &ss)) {
goto reply;
}
int64_t size = 0;
if (!cmd_getval(cmdmap, "size", size)) {
ss << "unable to parse 'size' value '"
<< cmd_vartype_stringify(cmdmap.at("size")) << "'";
err = -EINVAL;
goto reply;
}
// make sure new tier is empty
const pool_stat_t *pstats =
mon.mgrstatmon()->get_pool_stat(tierpool_id);
if (pstats && pstats->stats.sum.num_objects != 0) {
ss << "tier pool '" << tierpoolstr << "' is not empty";
err = -ENOTEMPTY;
goto reply;
}
auto& modestr = g_conf().get_val<string>("osd_tier_default_cache_mode");
pg_pool_t::cache_mode_t mode = pg_pool_t::get_cache_mode_from_str(modestr);
if (int(mode) < 0) {
ss << "osd tier cache default mode '" << modestr << "' is not a valid cache mode";
err = -EINVAL;
goto reply;
}
HitSet::Params hsp;
auto& cache_hit_set_type =
g_conf().get_val<string>("osd_tier_default_cache_hit_set_type");
if (cache_hit_set_type == "bloom") {
BloomHitSet::Params *bsp = new BloomHitSet::Params;
bsp->set_fpp(g_conf().get_val<double>("osd_pool_default_hit_set_bloom_fpp"));
hsp = HitSet::Params(bsp);
} else if (cache_hit_set_type == "explicit_hash") {
hsp = HitSet::Params(new ExplicitHashHitSet::Params);
} else if (cache_hit_set_type == "explicit_object") {
hsp = HitSet::Params(new ExplicitObjectHitSet::Params);
} else {
ss << "osd tier cache default hit set type '"
<< cache_hit_set_type << "' is not a known type";
err = -EINVAL;
goto reply;
}
// go
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
pg_pool_t *ntp = pending_inc.get_new_pool(tierpool_id, tp);
if (np->tiers.count(tierpool_id) || ntp->is_tier()) {
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
np->tiers.insert(tierpool_id);
np->read_tier = np->write_tier = tierpool_id;
np->set_snap_epoch(pending_inc.epoch); // tier will update to our snap info
np->set_last_force_op_resend(pending_inc.epoch);
ntp->set_last_force_op_resend(pending_inc.epoch);
ntp->tier_of = pool_id;
ntp->cache_mode = mode;
ntp->hit_set_count = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_count");
ntp->hit_set_period = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_period");
ntp->min_read_recency_for_promote = g_conf().get_val<uint64_t>("osd_tier_default_cache_min_read_recency_for_promote");
ntp->min_write_recency_for_promote = g_conf().get_val<uint64_t>("osd_tier_default_cache_min_write_recency_for_promote");
ntp->hit_set_grade_decay_rate = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_grade_decay_rate");
ntp->hit_set_search_last_n = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_search_last_n");
ntp->hit_set_params = hsp;
ntp->target_max_bytes = size;
ss << "pool '" << tierpoolstr << "' is now (or already was) a cache tier of '" << poolstr << "'";
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, ss.str(),
get_last_committed() + 1));
return true;
} else if (prefix == "osd pool set-quota") {
string poolstr;
cmd_getval(cmdmap, "pool", poolstr);
int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr);
if (pool_id < 0) {
ss << "unrecognized pool '" << poolstr << "'";
err = -ENOENT;
goto reply;
}
string field;
cmd_getval(cmdmap, "field", field);
if (field != "max_objects" && field != "max_bytes") {
ss << "unrecognized field '" << field << "'; should be 'max_bytes' or 'max_objects'";
err = -EINVAL;
goto reply;
}
// val could contain unit designations, so we treat as a string
string val;
cmd_getval(cmdmap, "val", val);
string tss;
int64_t value;
if (field == "max_objects") {
value = strict_si_cast<uint64_t>(val, &tss);
} else if (field == "max_bytes") {
value = strict_iecstrtoll(val, &tss);
} else {
ceph_abort_msg("unrecognized option");
}
if (!tss.empty()) {
ss << "error parsing value '" << val << "': " << tss;
err = -EINVAL;
goto reply;
}
pg_pool_t *pi = pending_inc.get_new_pool(pool_id, osdmap.get_pg_pool(pool_id));
if (field == "max_objects") {
pi->quota_max_objects = value;
} else if (field == "max_bytes") {
pi->quota_max_bytes = value;
} else {
ceph_abort_msg("unrecognized option");
}
ss << "set-quota " << field << " = " << value << " for pool " << poolstr;
rs = ss.str();
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
} else if (prefix == "osd pool application enable" ||
prefix == "osd pool application disable" ||
prefix == "osd pool application set" ||
prefix == "osd pool application rm") {
err = prepare_command_pool_application(prefix, cmdmap, ss);
if (err == -EAGAIN) {
goto wait;
} else if (err < 0) {
goto reply;
} else {
goto update;
}
} else if (prefix == "osd force-create-pg") {
pg_t pgid;
string pgidstr;
err = parse_pgid(cmdmap, ss, pgid, pgidstr);
if (err < 0)
goto reply;
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) {
ss << "This command will recreate a lost (as in data lost) PG with data in it, such "
<< "that the cluster will give up ever trying to recover the lost data. Do this "
<< "only if you are certain that all copies of the PG are in fact lost and you are "
<< "willing to accept that the data is permanently destroyed. Pass "
<< "--yes-i-really-mean-it to proceed.";
err = -EPERM;
goto reply;
}
bool creating_now;
{
std::lock_guard<std::mutex> l(creating_pgs_lock);
auto emplaced = creating_pgs.pgs.emplace(
pgid,
creating_pgs_t::pg_create_info(osdmap.get_epoch(),
ceph_clock_now()));
creating_now = emplaced.second;
}
if (creating_now) {
ss << "pg " << pgidstr << " now creating, ok";
// set the pool's CREATING flag so that (1) the osd won't ignore our
// create message and (2) we won't propose any future pg_num changes
// until after the PG has been instantiated.
if (pending_inc.new_pools.count(pgid.pool()) == 0) {
pending_inc.new_pools[pgid.pool()] = *osdmap.get_pg_pool(pgid.pool());
}
pending_inc.new_pools[pgid.pool()].flags |= pg_pool_t::FLAG_CREATING;
err = 0;
goto update;
} else {
ss << "pg " << pgid << " already creating";
err = 0;
goto reply;
}
} else if (prefix == "osd force_healthy_stretch_mode") {
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) {
ss << "This command will require peering across multiple CRUSH buckets "
"(probably two data centers or availability zones?) and may result in PGs "
"going inactive until backfilling is complete. Pass --yes-i-really-mean-it to proceed.";
err = -EPERM;
goto reply;
}
try_end_recovery_stretch_mode(true);
ss << "Triggering healthy stretch mode";
err = 0;
goto reply;
} else if (prefix == "osd force_recovery_stretch_mode") {
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
if (!sure) {
ss << "This command will increase pool sizes to try and spread them "
"across multiple CRUSH buckets (probably two data centers or "
"availability zones?) and should have happened automatically"
"Pass --yes-i-really-mean-it to proceed.";
err = -EPERM;
goto reply;
}
mon.go_recovery_stretch_mode();
ss << "Triggering recovery stretch mode";
err = 0;
goto reply;
} else if (prefix == "osd set-allow-crimson") {
bool sure = false;
cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
bool experimental_enabled =
g_ceph_context->check_experimental_feature_enabled("crimson");
if (!sure || !experimental_enabled) {
ss << "This command will allow usage of crimson-osd osd daemons. "
<< "crimson-osd is not considered stable and will likely cause "
<< "crashes or data corruption. At this time, crimson-osd is mainly "
<< "useful for performance evaluation, testing, and development. "
<< "If you are sure, add --yes-i-really-mean-it and add 'crimson' to "
<< "the experimental features config. This setting is irrevocable.";
err = -EPERM;
goto reply;
}
err = 0;
if (osdmap.get_allow_crimson()) {
goto reply;
} else {
pending_inc.set_allow_crimson();
goto update;
}
} else {
err = -EINVAL;
}
reply:
getline(ss, rs);
if (err < 0 && rs.length() == 0)
rs = cpp_strerror(err);
mon.reply_command(op, err, rs, rdata, get_last_committed());
return ret;
update:
getline(ss, rs);
wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs,
get_last_committed() + 1));
return true;
wait:
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
bool OSDMonitor::enforce_pool_op_caps(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MPoolOp>();
MonSession *session = op->get_session();
if (!session) {
_pool_op_reply(op, -EPERM, osdmap.get_epoch());
return true;
}
switch (m->op) {
case POOL_OP_CREATE_UNMANAGED_SNAP:
case POOL_OP_DELETE_UNMANAGED_SNAP:
{
const std::string* pool_name = nullptr;
const pg_pool_t *pg_pool = osdmap.get_pg_pool(m->pool);
if (pg_pool != nullptr) {
pool_name = &osdmap.get_pool_name(m->pool);
}
if (!is_unmanaged_snap_op_permitted(cct, mon.key_server,
session->entity_name, session->caps,
session->get_peer_socket_addr(),
pool_name)) {
dout(0) << "got unmanaged-snap pool op from entity with insufficient "
<< "privileges. message: " << *m << std::endl
<< "caps: " << session->caps << dendl;
_pool_op_reply(op, -EPERM, osdmap.get_epoch());
return true;
}
}
break;
default:
if (!session->is_capable("osd", MON_CAP_W)) {
dout(0) << "got pool op from entity with insufficient privileges. "
<< "message: " << *m << std::endl
<< "caps: " << session->caps << dendl;
_pool_op_reply(op, -EPERM, osdmap.get_epoch());
return true;
}
break;
}
return false;
}
bool OSDMonitor::preprocess_pool_op(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MPoolOp>();
if (enforce_pool_op_caps(op)) {
return true;
}
if (m->fsid != mon.monmap->fsid) {
dout(0) << __func__ << " drop message on fsid " << m->fsid
<< " != " << mon.monmap->fsid << " for " << *m << dendl;
_pool_op_reply(op, -EINVAL, osdmap.get_epoch());
return true;
}
if (m->op == POOL_OP_CREATE)
return preprocess_pool_op_create(op);
const pg_pool_t *p = osdmap.get_pg_pool(m->pool);
if (p == nullptr) {
dout(10) << "attempt to operate on non-existent pool id " << m->pool << dendl;
if (m->op == POOL_OP_DELETE) {
_pool_op_reply(op, 0, osdmap.get_epoch());
} else {
_pool_op_reply(op, -ENOENT, osdmap.get_epoch());
}
return true;
}
// check if the snap and snapname exist
bool snap_exists = false;
if (p->snap_exists(m->name.c_str()))
snap_exists = true;
switch (m->op) {
case POOL_OP_CREATE_SNAP:
if (p->is_unmanaged_snaps_mode() || p->is_tier()) {
_pool_op_reply(op, -EINVAL, osdmap.get_epoch());
return true;
}
if (snap_exists) {
_pool_op_reply(op, 0, osdmap.get_epoch());
return true;
}
return false;
case POOL_OP_CREATE_UNMANAGED_SNAP:
if (p->is_pool_snaps_mode()) {
_pool_op_reply(op, -EINVAL, osdmap.get_epoch());
return true;
}
return false;
case POOL_OP_DELETE_SNAP:
if (p->is_unmanaged_snaps_mode()) {
_pool_op_reply(op, -EINVAL, osdmap.get_epoch());
return true;
}
if (!snap_exists) {
_pool_op_reply(op, 0, osdmap.get_epoch());
return true;
}
return false;
case POOL_OP_DELETE_UNMANAGED_SNAP:
if (p->is_pool_snaps_mode()) {
_pool_op_reply(op, -EINVAL, osdmap.get_epoch());
return true;
}
if (_is_removed_snap(m->pool, m->snapid)) {
_pool_op_reply(op, 0, osdmap.get_epoch());
return true;
}
return false;
case POOL_OP_DELETE:
if (osdmap.lookup_pg_pool_name(m->name.c_str()) >= 0) {
_pool_op_reply(op, 0, osdmap.get_epoch());
return true;
}
return false;
case POOL_OP_AUID_CHANGE:
return false;
default:
ceph_abort();
break;
}
return false;
}
bool OSDMonitor::_is_removed_snap(int64_t pool, snapid_t snap)
{
if (!osdmap.have_pg_pool(pool)) {
dout(10) << __func__ << " pool " << pool << " snap " << snap
<< " - pool dne" << dendl;
return true;
}
if (osdmap.in_removed_snaps_queue(pool, snap)) {
dout(10) << __func__ << " pool " << pool << " snap " << snap
<< " - in osdmap removed_snaps_queue" << dendl;
return true;
}
snapid_t begin, end;
int r = lookup_purged_snap(pool, snap, &begin, &end);
if (r == 0) {
dout(10) << __func__ << " pool " << pool << " snap " << snap
<< " - purged, [" << begin << "," << end << ")" << dendl;
return true;
}
return false;
}
bool OSDMonitor::_is_pending_removed_snap(int64_t pool, snapid_t snap)
{
if (pending_inc.old_pools.count(pool)) {
dout(10) << __func__ << " pool " << pool << " snap " << snap
<< " - pool pending deletion" << dendl;
return true;
}
if (pending_inc.in_new_removed_snaps(pool, snap)) {
dout(10) << __func__ << " pool " << pool << " snap " << snap
<< " - in pending new_removed_snaps" << dendl;
return true;
}
return false;
}
bool OSDMonitor::preprocess_pool_op_create(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MPoolOp>();
int64_t pool = osdmap.lookup_pg_pool_name(m->name.c_str());
if (pool >= 0) {
_pool_op_reply(op, 0, osdmap.get_epoch());
return true;
}
return false;
}
bool OSDMonitor::prepare_pool_op(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MPoolOp>();
dout(10) << "prepare_pool_op " << *m << dendl;
if (m->op == POOL_OP_CREATE) {
return prepare_pool_op_create(op);
} else if (m->op == POOL_OP_DELETE) {
return prepare_pool_op_delete(op);
}
int ret = 0;
bool changed = false;
if (!osdmap.have_pg_pool(m->pool)) {
_pool_op_reply(op, -ENOENT, osdmap.get_epoch());
return false;
}
const pg_pool_t *pool = osdmap.get_pg_pool(m->pool);
if (m->op == POOL_OP_CREATE_SNAP ||
m->op == POOL_OP_CREATE_UNMANAGED_SNAP) {
if (const auto& fsmap = mon.mdsmon()->get_fsmap(); fsmap.pool_in_use(m->pool)) {
dout(20) << "monitor-managed snapshots have been disabled for pools "
" attached to an fs - pool:" << m->pool << dendl;
_pool_op_reply(op, -EOPNOTSUPP, osdmap.get_epoch());
return false;
}
}
switch (m->op) {
case POOL_OP_CREATE_SNAP:
if (pool->is_tier()) {
ret = -EINVAL;
_pool_op_reply(op, ret, osdmap.get_epoch());
return false;
} // else, fall through
case POOL_OP_DELETE_SNAP:
if (!pool->is_unmanaged_snaps_mode()) {
bool snap_exists = pool->snap_exists(m->name.c_str());
if ((m->op == POOL_OP_CREATE_SNAP && snap_exists)
|| (m->op == POOL_OP_DELETE_SNAP && !snap_exists)) {
ret = 0;
} else {
break;
}
} else {
ret = -EINVAL;
}
_pool_op_reply(op, ret, osdmap.get_epoch());
return false;
case POOL_OP_DELETE_UNMANAGED_SNAP:
// we won't allow removal of an unmanaged snapshot from a pool
// not in unmanaged snaps mode.
if (!pool->is_unmanaged_snaps_mode()) {
_pool_op_reply(op, -ENOTSUP, osdmap.get_epoch());
return false;
}
/* fall-thru */
case POOL_OP_CREATE_UNMANAGED_SNAP:
// but we will allow creating an unmanaged snapshot on any pool
// as long as it is not in 'pool' snaps mode.
if (pool->is_pool_snaps_mode()) {
_pool_op_reply(op, -EINVAL, osdmap.get_epoch());
return false;
}
}
// projected pool info
pg_pool_t pp;
if (pending_inc.new_pools.count(m->pool))
pp = pending_inc.new_pools[m->pool];
else
pp = *osdmap.get_pg_pool(m->pool);
bufferlist reply_data;
// pool snaps vs unmanaged snaps are mutually exclusive
switch (m->op) {
case POOL_OP_CREATE_SNAP:
case POOL_OP_DELETE_SNAP:
if (pp.is_unmanaged_snaps_mode()) {
ret = -EINVAL;
goto out;
}
break;
case POOL_OP_CREATE_UNMANAGED_SNAP:
case POOL_OP_DELETE_UNMANAGED_SNAP:
if (pp.is_pool_snaps_mode()) {
ret = -EINVAL;
goto out;
}
}
switch (m->op) {
case POOL_OP_CREATE_SNAP:
if (!pp.snap_exists(m->name.c_str())) {
pp.add_snap(m->name.c_str(), ceph_clock_now());
dout(10) << "create snap in pool " << m->pool << " " << m->name
<< " seq " << pp.get_snap_epoch() << dendl;
changed = true;
}
break;
case POOL_OP_DELETE_SNAP:
{
snapid_t s = pp.snap_exists(m->name.c_str());
if (s) {
pp.remove_snap(s);
pending_inc.new_removed_snaps[m->pool].insert(s);
changed = true;
}
}
break;
case POOL_OP_CREATE_UNMANAGED_SNAP:
{
uint64_t snapid = pp.add_unmanaged_snap(
osdmap.require_osd_release < ceph_release_t::octopus);
encode(snapid, reply_data);
changed = true;
}
break;
case POOL_OP_DELETE_UNMANAGED_SNAP:
if (!_is_removed_snap(m->pool, m->snapid) &&
!_is_pending_removed_snap(m->pool, m->snapid)) {
if (m->snapid > pp.get_snap_seq()) {
_pool_op_reply(op, -ENOENT, osdmap.get_epoch());
return false;
}
pp.remove_unmanaged_snap(
m->snapid,
osdmap.require_osd_release < ceph_release_t::octopus);
pending_inc.new_removed_snaps[m->pool].insert(m->snapid);
// also record the new seq as purged: this avoids a discontinuity
// after all of the snaps have been purged, since the seq assigned
// during removal lives in the same namespace as the actual snaps.
pending_pseudo_purged_snaps[m->pool].insert(pp.get_snap_seq());
changed = true;
}
break;
case POOL_OP_AUID_CHANGE:
_pool_op_reply(op, -EOPNOTSUPP, osdmap.get_epoch());
return false;
default:
ceph_abort();
break;
}
if (changed) {
pp.set_snap_epoch(pending_inc.epoch);
pending_inc.new_pools[m->pool] = pp;
}
out:
wait_for_finished_proposal(op, new OSDMonitor::C_PoolOp(this, op, ret, pending_inc.epoch, &reply_data));
return true;
}
bool OSDMonitor::prepare_pool_op_create(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
int err = prepare_new_pool(op);
wait_for_finished_proposal(op, new OSDMonitor::C_PoolOp(this, op, err, pending_inc.epoch));
return true;
}
int OSDMonitor::_check_remove_pool(int64_t pool_id, const pg_pool_t& pool,
ostream *ss)
{
const string& poolstr = osdmap.get_pool_name(pool_id);
// If the Pool is in use by CephFS, refuse to delete it
FSMap const &pending_fsmap = mon.mdsmon()->get_pending_fsmap();
if (pending_fsmap.pool_in_use(pool_id)) {
*ss << "pool '" << poolstr << "' is in use by CephFS";
return -EBUSY;
}
if (pool.tier_of >= 0) {
*ss << "pool '" << poolstr << "' is a tier of '"
<< osdmap.get_pool_name(pool.tier_of) << "'";
return -EBUSY;
}
if (!pool.tiers.empty()) {
*ss << "pool '" << poolstr << "' has tiers";
for(auto tier : pool.tiers) {
*ss << " " << osdmap.get_pool_name(tier);
}
return -EBUSY;
}
if (!g_conf()->mon_allow_pool_delete) {
*ss << "pool deletion is disabled; you must first set the mon_allow_pool_delete config option to true before you can destroy a pool";
return -EPERM;
}
if (pool.has_flag(pg_pool_t::FLAG_NODELETE)) {
*ss << "pool deletion is disabled; you must unset nodelete flag for the pool first";
return -EPERM;
}
*ss << "pool '" << poolstr << "' removed";
return 0;
}
/**
* Check if it is safe to add a tier to a base pool
*
* @return
* True if the operation should proceed, false if we should abort here
* (abort doesn't necessarily mean error, could be idempotency)
*/
bool OSDMonitor::_check_become_tier(
const int64_t tier_pool_id, const pg_pool_t *tier_pool,
const int64_t base_pool_id, const pg_pool_t *base_pool,
int *err,
ostream *ss) const
{
const std::string &tier_pool_name = osdmap.get_pool_name(tier_pool_id);
const std::string &base_pool_name = osdmap.get_pool_name(base_pool_id);
if (tier_pool->is_crimson()) {
*ss << "pool '" << tier_pool_name << "' is a crimson pool, tiering "
<< "features are not supported";
*err = -EINVAL;
return false;
}
if (base_pool->is_crimson()) {
*ss << "pool '" << base_pool_name << "' is a crimson pool, tiering "
<< "features are not supported";
*err = -EINVAL;
return false;
}
const FSMap &pending_fsmap = mon.mdsmon()->get_pending_fsmap();
if (pending_fsmap.pool_in_use(tier_pool_id)) {
*ss << "pool '" << tier_pool_name << "' is in use by CephFS";
*err = -EBUSY;
return false;
}
if (base_pool->tiers.count(tier_pool_id)) {
ceph_assert(tier_pool->tier_of == base_pool_id);
*err = 0;
*ss << "pool '" << tier_pool_name << "' is now (or already was) a tier of '"
<< base_pool_name << "'";
return false;
}
if (base_pool->is_tier()) {
*ss << "pool '" << base_pool_name << "' is already a tier of '"
<< osdmap.get_pool_name(base_pool->tier_of) << "', "
<< "multiple tiers are not yet supported.";
*err = -EINVAL;
return false;
}
if (tier_pool->has_tiers()) {
*ss << "pool '" << tier_pool_name << "' has following tier(s) already:";
for (set<uint64_t>::iterator it = tier_pool->tiers.begin();
it != tier_pool->tiers.end(); ++it)
*ss << "'" << osdmap.get_pool_name(*it) << "',";
*ss << " multiple tiers are not yet supported.";
*err = -EINVAL;
return false;
}
if (tier_pool->is_tier()) {
*ss << "tier pool '" << tier_pool_name << "' is already a tier of '"
<< osdmap.get_pool_name(tier_pool->tier_of) << "'";
*err = -EINVAL;
return false;
}
*err = 0;
return true;
}
/**
* Check if it is safe to remove a tier from this base pool
*
* @return
* True if the operation should proceed, false if we should abort here
* (abort doesn't necessarily mean error, could be idempotency)
*/
bool OSDMonitor::_check_remove_tier(
const int64_t base_pool_id, const pg_pool_t *base_pool,
const pg_pool_t *tier_pool,
int *err, ostream *ss) const
{
const std::string &base_pool_name = osdmap.get_pool_name(base_pool_id);
// Apply CephFS-specific checks
const FSMap &pending_fsmap = mon.mdsmon()->get_pending_fsmap();
if (pending_fsmap.pool_in_use(base_pool_id)) {
if (base_pool->is_erasure() && !base_pool->allows_ecoverwrites()) {
// If the underlying pool is erasure coded and does not allow EC
// overwrites, we can't permit the removal of the replicated tier that
// CephFS relies on to access it
*ss << "pool '" << base_pool_name <<
"' does not allow EC overwrites and is in use by CephFS"
" via its tier";
*err = -EBUSY;
return false;
}
if (tier_pool && tier_pool->cache_mode == pg_pool_t::CACHEMODE_WRITEBACK) {
*ss << "pool '" << base_pool_name << "' is in use by CephFS, and this "
"tier is still in use as a writeback cache. Change the cache "
"mode and flush the cache before removing it";
*err = -EBUSY;
return false;
}
}
*err = 0;
return true;
}
int OSDMonitor::_prepare_remove_pool(
int64_t pool, ostream *ss, bool no_fake)
{
dout(10) << __func__ << " " << pool << dendl;
const pg_pool_t *p = osdmap.get_pg_pool(pool);
int r = _check_remove_pool(pool, *p, ss);
if (r < 0)
return r;
auto new_pool = pending_inc.new_pools.find(pool);
if (new_pool != pending_inc.new_pools.end()) {
// if there is a problem with the pending info, wait and retry
// this op.
const auto& p = new_pool->second;
int r = _check_remove_pool(pool, p, ss);
if (r < 0)
return -EAGAIN;
}
if (pending_inc.old_pools.count(pool)) {
dout(10) << __func__ << " " << pool << " already pending removal"
<< dendl;
return 0;
}
if (g_conf()->mon_fake_pool_delete && !no_fake) {
string old_name = osdmap.get_pool_name(pool);
string new_name = old_name + "." + stringify(pool) + ".DELETED";
dout(1) << __func__ << " faking pool deletion: renaming " << pool << " "
<< old_name << " -> " << new_name << dendl;
pending_inc.new_pool_names[pool] = new_name;
return 0;
}
// remove
pending_inc.old_pools.insert(pool);
// remove any pg_temp mappings for this pool
for (auto p = osdmap.pg_temp->begin();
p != osdmap.pg_temp->end();
++p) {
if (p->first.pool() == pool) {
dout(10) << __func__ << " " << pool << " removing obsolete pg_temp "
<< p->first << dendl;
pending_inc.new_pg_temp[p->first].clear();
}
}
// remove any primary_temp mappings for this pool
for (auto p = osdmap.primary_temp->begin();
p != osdmap.primary_temp->end();
++p) {
if (p->first.pool() == pool) {
dout(10) << __func__ << " " << pool
<< " removing obsolete primary_temp" << p->first << dendl;
pending_inc.new_primary_temp[p->first] = -1;
}
}
// remove any pg_upmap mappings for this pool
for (auto& p : osdmap.pg_upmap) {
if (p.first.pool() == pool) {
dout(10) << __func__ << " " << pool
<< " removing obsolete pg_upmap "
<< p.first << dendl;
pending_inc.old_pg_upmap.insert(p.first);
}
}
// remove any pending pg_upmap mappings for this pool
{
auto it = pending_inc.new_pg_upmap.begin();
while (it != pending_inc.new_pg_upmap.end()) {
if (it->first.pool() == pool) {
dout(10) << __func__ << " " << pool
<< " removing pending pg_upmap "
<< it->first << dendl;
it = pending_inc.new_pg_upmap.erase(it);
} else {
it++;
}
}
}
// remove any pg_upmap_items mappings for this pool
for (auto& p : osdmap.pg_upmap_items) {
if (p.first.pool() == pool) {
dout(10) << __func__ << " " << pool
<< " removing obsolete pg_upmap_items " << p.first
<< dendl;
pending_inc.old_pg_upmap_items.insert(p.first);
}
}
// remove any pending pg_upmap mappings for this pool
{
auto it = pending_inc.new_pg_upmap_items.begin();
while (it != pending_inc.new_pg_upmap_items.end()) {
if (it->first.pool() == pool) {
dout(10) << __func__ << " " << pool
<< " removing pending pg_upmap_items "
<< it->first << dendl;
it = pending_inc.new_pg_upmap_items.erase(it);
} else {
it++;
}
}
}
// remove any choose_args for this pool
CrushWrapper newcrush = _get_pending_crush();
if (newcrush.have_choose_args(pool)) {
dout(10) << __func__ << " removing choose_args for pool " << pool << dendl;
newcrush.rm_choose_args(pool);
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, mon.get_quorum_con_features());
}
return 0;
}
int OSDMonitor::_prepare_rename_pool(int64_t pool, string newname)
{
dout(10) << "_prepare_rename_pool " << pool << dendl;
if (pending_inc.old_pools.count(pool)) {
dout(10) << "_prepare_rename_pool " << pool << " pending removal" << dendl;
return -ENOENT;
}
for (map<int64_t,string>::iterator p = pending_inc.new_pool_names.begin();
p != pending_inc.new_pool_names.end();
++p) {
if (p->second == newname && p->first != pool) {
return -EEXIST;
}
}
pending_inc.new_pool_names[pool] = newname;
return 0;
}
bool OSDMonitor::prepare_pool_op_delete(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MPoolOp>();
ostringstream ss;
int ret = _prepare_remove_pool(m->pool, &ss, false);
if (ret == -EAGAIN) {
wait_for_finished_proposal(op, new C_RetryMessage(this, op));
return true;
}
if (ret < 0)
dout(10) << __func__ << " got " << ret << " " << ss.str() << dendl;
wait_for_finished_proposal(op, new OSDMonitor::C_PoolOp(this, op, ret,
pending_inc.epoch));
return true;
}
void OSDMonitor::_pool_op_reply(MonOpRequestRef op,
int ret, epoch_t epoch, bufferlist *blp)
{
op->mark_osdmon_event(__func__);
auto m = op->get_req<MPoolOp>();
dout(20) << "_pool_op_reply " << ret << dendl;
MPoolOpReply *reply = new MPoolOpReply(m->fsid, m->get_tid(),
ret, epoch, get_last_committed(), blp);
mon.send_reply(op, reply);
}
void OSDMonitor::convert_pool_priorities(void)
{
pool_opts_t::key_t key = pool_opts_t::get_opt_desc("recovery_priority").key;
int64_t max_prio = 0;
int64_t min_prio = 0;
for (const auto &i : osdmap.get_pools()) {
const auto &pool = i.second;
if (pool.opts.is_set(key)) {
int64_t prio = 0;
pool.opts.get(key, &prio);
if (prio > max_prio)
max_prio = prio;
if (prio < min_prio)
min_prio = prio;
}
}
if (max_prio <= OSD_POOL_PRIORITY_MAX && min_prio >= OSD_POOL_PRIORITY_MIN) {
dout(20) << __func__ << " nothing to fix" << dendl;
return;
}
// Current pool priorities exceeds new maximum
for (const auto &i : osdmap.get_pools()) {
const auto pool_id = i.first;
pg_pool_t pool = i.second;
int64_t prio = 0;
pool.opts.get(key, &prio);
int64_t n;
if (prio > 0 && max_prio > OSD_POOL_PRIORITY_MAX) { // Likely scenario
// Scaled priority range 0 to OSD_POOL_PRIORITY_MAX
n = (float)prio / max_prio * OSD_POOL_PRIORITY_MAX;
} else if (prio < 0 && min_prio < OSD_POOL_PRIORITY_MIN) {
// Scaled priority range OSD_POOL_PRIORITY_MIN to 0
n = (float)prio / min_prio * OSD_POOL_PRIORITY_MIN;
} else {
continue;
}
if (n == 0) {
pool.opts.unset(key);
} else {
pool.opts.set(key, static_cast<int64_t>(n));
}
dout(10) << __func__ << " pool " << pool_id
<< " recovery_priority adjusted "
<< prio << " to " << n << dendl;
pool.last_change = pending_inc.epoch;
pending_inc.new_pools[pool_id] = pool;
}
}
void OSDMonitor::try_enable_stretch_mode_pools(stringstream& ss, bool *okay,
int *errcode,
set<pg_pool_t*>* pools,
const string& new_crush_rule)
{
dout(20) << __func__ << dendl;
*okay = false;
int new_crush_rule_result = osdmap.crush->get_rule_id(new_crush_rule);
if (new_crush_rule_result < 0) {
ss << "unrecognized crush rule " << new_crush_rule_result;
*errcode = new_crush_rule_result;
return;
}
__u8 new_rule = static_cast<__u8>(new_crush_rule_result);
for (const auto& pooli : osdmap.pools) {
int64_t poolid = pooli.first;
const pg_pool_t *p = &pooli.second;
if (!p->is_replicated()) {
ss << "stretched pools must be replicated; '" << osdmap.pool_name[poolid] << "' is erasure-coded";
*errcode = -EINVAL;
return;
}
uint8_t default_size = g_conf().get_val<uint64_t>("osd_pool_default_size");
if ((p->get_size() != default_size ||
(p->get_min_size() != g_conf().get_osd_pool_default_min_size(default_size))) &&
(p->get_crush_rule() != new_rule)) {
ss << "we currently require stretch mode pools start out with the"
" default size/min_size, which '" << osdmap.pool_name[poolid] << "' does not";
*errcode = -EINVAL;
return;
}
pg_pool_t *pp = pending_inc.get_new_pool(poolid, p);
// TODO: The part where we unconditionally copy the pools into pending_inc is bad
// the attempt may fail and then we have these pool updates...but they won't do anything
// if there is a failure, so if it's hard to change the interface, no need to bother
pools->insert(pp);
}
*okay = true;
return;
}
void OSDMonitor::try_enable_stretch_mode(stringstream& ss, bool *okay,
int *errcode, bool commit,
const string& dividing_bucket,
uint32_t bucket_count,
const set<pg_pool_t*>& pools,
const string& new_crush_rule)
{
dout(20) << __func__ << dendl;
*okay = false;
CrushWrapper crush = _get_pending_crush();
int dividing_id = -1;
if (auto type_id = crush.get_validated_type_id(dividing_bucket);
!type_id.has_value()) {
ss << dividing_bucket << " is not a valid crush bucket type";
*errcode = -ENOENT;
ceph_assert(!commit);
return;
} else {
dividing_id = *type_id;
}
vector<int> subtrees;
crush.get_subtree_of_type(dividing_id, &subtrees);
if (subtrees.size() != 2) {
ss << "there are " << subtrees.size() << dividing_bucket
<< "'s in the cluster but stretch mode currently only works with 2!";
*errcode = -EINVAL;
ceph_assert(!commit || subtrees.size() == 2);
return;
}
int new_crush_rule_result = crush.get_rule_id(new_crush_rule);
if (new_crush_rule_result < 0) {
ss << "unrecognized crush rule " << new_crush_rule;
*errcode = new_crush_rule_result;
ceph_assert(!commit || (new_crush_rule_result > 0));
return;
}
__u8 new_rule = static_cast<__u8>(new_crush_rule_result);
int weight1 = crush.get_item_weight(subtrees[0]);
int weight2 = crush.get_item_weight(subtrees[1]);
if (weight1 != weight2) {
// TODO: I'm really not sure this is a good idea?
ss << "the 2 " << dividing_bucket
<< "instances in the cluster have differing weights "
<< weight1 << " and " << weight2
<<" but stretch mode currently requires they be the same!";
*errcode = -EINVAL;
ceph_assert(!commit || (weight1 == weight2));
return;
}
if (bucket_count != 2) {
ss << "currently we only support 2-site stretch clusters!";
*errcode = -EINVAL;
ceph_assert(!commit || bucket_count == 2);
return;
}
// TODO: check CRUSH rules for pools so that we are appropriately divided
if (commit) {
for (auto pool : pools) {
pool->crush_rule = new_rule;
pool->peering_crush_bucket_count = bucket_count;
pool->peering_crush_bucket_target = bucket_count;
pool->peering_crush_bucket_barrier = dividing_id;
pool->peering_crush_mandatory_member = CRUSH_ITEM_NONE;
pool->size = g_conf().get_val<uint64_t>("mon_stretch_pool_size");
pool->min_size = g_conf().get_val<uint64_t>("mon_stretch_pool_min_size");
}
pending_inc.change_stretch_mode = true;
pending_inc.stretch_mode_enabled = true;
pending_inc.new_stretch_bucket_count = bucket_count;
pending_inc.new_degraded_stretch_mode = 0;
pending_inc.new_stretch_mode_bucket = dividing_id;
}
*okay = true;
return;
}
bool OSDMonitor::check_for_dead_crush_zones(const map<string,set<string>>& dead_buckets,
set<int> *really_down_buckets,
set<string> *really_down_mons)
{
dout(20) << __func__ << " with dead mon zones " << dead_buckets << dendl;
ceph_assert(is_readable());
if (dead_buckets.empty()) return false;
set<int> down_cache;
bool really_down = false;
for (auto dbi : dead_buckets) {
const string& bucket_name = dbi.first;
ceph_assert(osdmap.crush->name_exists(bucket_name));
int bucket_id = osdmap.crush->get_item_id(bucket_name);
dout(20) << "Checking " << bucket_name << " id " << bucket_id
<< " to see if OSDs are also down" << dendl;
bool subtree_down = osdmap.subtree_is_down(bucket_id, &down_cache);
if (subtree_down) {
dout(20) << "subtree is down!" << dendl;
really_down = true;
really_down_buckets->insert(bucket_id);
really_down_mons->insert(dbi.second.begin(), dbi.second.end());
}
}
dout(10) << "We determined CRUSH buckets " << *really_down_buckets
<< " and mons " << *really_down_mons << " are really down" << dendl;
return really_down;
}
void OSDMonitor::trigger_degraded_stretch_mode(const set<int>& dead_buckets,
const set<string>& live_zones)
{
dout(20) << __func__ << dendl;
stretch_recovery_triggered.set_from_double(0); // reset this; we can't go clean now!
// update the general OSDMap changes
pending_inc.change_stretch_mode = true;
pending_inc.stretch_mode_enabled = osdmap.stretch_mode_enabled;
pending_inc.new_stretch_bucket_count = osdmap.stretch_bucket_count;
int new_site_count = osdmap.stretch_bucket_count - dead_buckets.size();
ceph_assert(new_site_count == 1); // stretch count 2!
pending_inc.new_degraded_stretch_mode = new_site_count;
pending_inc.new_recovering_stretch_mode = 0;
pending_inc.new_stretch_mode_bucket = osdmap.stretch_mode_bucket;
// and then apply them to all the pg_pool_ts
ceph_assert(live_zones.size() == 1); // only support 2 zones now
const string& remaining_site_name = *(live_zones.begin());
ceph_assert(osdmap.crush->name_exists(remaining_site_name));
int remaining_site = osdmap.crush->get_item_id(remaining_site_name);
for (auto pgi : osdmap.pools) {
if (pgi.second.peering_crush_bucket_count) {
pg_pool_t& newp = *pending_inc.get_new_pool(pgi.first, &pgi.second);
newp.peering_crush_bucket_count = new_site_count;
newp.peering_crush_mandatory_member = remaining_site;
newp.min_size = pgi.second.min_size / 2; // only support 2 zones now
newp.set_last_force_op_resend(pending_inc.epoch);
}
}
propose_pending();
}
void OSDMonitor::trigger_recovery_stretch_mode()
{
dout(20) << __func__ << dendl;
stretch_recovery_triggered.set_from_double(0); // reset this so we don't go full-active prematurely
pending_inc.change_stretch_mode = true;
pending_inc.stretch_mode_enabled = osdmap.stretch_mode_enabled;
pending_inc.new_stretch_bucket_count = osdmap.stretch_bucket_count;
pending_inc.new_degraded_stretch_mode = osdmap.degraded_stretch_mode;
pending_inc.new_recovering_stretch_mode = 1;
pending_inc.new_stretch_mode_bucket = osdmap.stretch_mode_bucket;
for (auto pgi : osdmap.pools) {
if (pgi.second.peering_crush_bucket_count) {
pg_pool_t& newp = *pending_inc.get_new_pool(pgi.first, &pgi.second);
newp.set_last_force_op_resend(pending_inc.epoch);
}
}
propose_pending();
}
void OSDMonitor::set_degraded_stretch_mode()
{
stretch_recovery_triggered.set_from_double(0);
}
void OSDMonitor::set_recovery_stretch_mode()
{
if (stretch_recovery_triggered.is_zero()) {
stretch_recovery_triggered = ceph_clock_now();
}
}
void OSDMonitor::set_healthy_stretch_mode()
{
stretch_recovery_triggered.set_from_double(0);
}
void OSDMonitor::notify_new_pg_digest()
{
dout(20) << __func__ << dendl;
if (!stretch_recovery_triggered.is_zero()) {
try_end_recovery_stretch_mode(false);
}
}
struct CMonExitRecovery : public Context {
OSDMonitor *m;
bool force;
CMonExitRecovery(OSDMonitor *mon, bool f) : m(mon), force(f) {}
void finish(int r) {
m->try_end_recovery_stretch_mode(force);
}
};
void OSDMonitor::try_end_recovery_stretch_mode(bool force)
{
dout(20) << __func__ << dendl;
if (!mon.is_leader()) return;
if (!mon.is_degraded_stretch_mode()) return;
if (!mon.is_recovering_stretch_mode()) return;
if (!is_readable()) {
wait_for_readable_ctx(new CMonExitRecovery(this, force));
return;
}
if (osdmap.recovering_stretch_mode &&
((!stretch_recovery_triggered.is_zero() &&
ceph_clock_now() - g_conf().get_val<double>("mon_stretch_recovery_min_wait") >
stretch_recovery_triggered) ||
force)) {
if (!mon.mgrstatmon()->is_readable()) {
mon.mgrstatmon()->wait_for_readable_ctx(new CMonExitRecovery(this, force));
return;
}
const PGMapDigest& pgd = mon.mgrstatmon()->get_digest();
double misplaced, degraded, inactive, unknown;
pgd.get_recovery_stats(&misplaced, °raded, &inactive, &unknown);
if (force || (degraded == 0.0 && inactive == 0.0 && unknown == 0.0)) {
// we can exit degraded stretch mode!
mon.trigger_healthy_stretch_mode();
}
}
}
void OSDMonitor::trigger_healthy_stretch_mode()
{
ceph_assert(is_writeable());
stretch_recovery_triggered.set_from_double(0);
pending_inc.change_stretch_mode = true;
pending_inc.stretch_mode_enabled = osdmap.stretch_mode_enabled;
pending_inc.new_stretch_bucket_count = osdmap.stretch_bucket_count;
pending_inc.new_degraded_stretch_mode = 0; // turn off degraded mode...
pending_inc.new_recovering_stretch_mode = 0; //...and recovering mode!
pending_inc.new_stretch_mode_bucket = osdmap.stretch_mode_bucket;
for (auto pgi : osdmap.pools) {
if (pgi.second.peering_crush_bucket_count) {
pg_pool_t& newp = *pending_inc.get_new_pool(pgi.first, &pgi.second);
newp.peering_crush_bucket_count = osdmap.stretch_bucket_count;
newp.peering_crush_mandatory_member = CRUSH_ITEM_NONE;
newp.min_size = g_conf().get_val<uint64_t>("mon_stretch_pool_min_size");
newp.set_last_force_op_resend(pending_inc.epoch);
}
}
propose_pending();
}
| 479,187 | 31.001336 | 150 |
cc
|
null |
ceph-main/src/mon/OSDMonitor.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Object Store Device (OSD) Monitor
*/
#ifndef CEPH_OSDMONITOR_H
#define CEPH_OSDMONITOR_H
#include <map>
#include <set>
#include <utility>
#include <sstream>
#include "include/types.h"
#include "include/encoding.h"
#include "common/simple_cache.hpp"
#include "common/PriorityCache.h"
#include "msg/Messenger.h"
#include "osd/OSDMap.h"
#include "osd/OSDMapMapping.h"
#include "CreatingPGs.h"
#include "PaxosService.h"
#include "erasure-code/ErasureCodeInterface.h"
#include "mon/MonOpRequest.h"
#include <boost/functional/hash.hpp>
class Monitor;
class PGMap;
struct MonSession;
class MOSDMap;
/// information about a particular peer's failure reports for one osd
struct failure_reporter_t {
utime_t failed_since; ///< when they think it failed
MonOpRequestRef op; ///< failure op request
failure_reporter_t() {}
failure_reporter_t(utime_t s, MonOpRequestRef op)
: failed_since(s), op(op) {}
~failure_reporter_t() { }
};
/// information about all failure reports for one osd
struct failure_info_t {
std::map<int, failure_reporter_t> reporters; ///< reporter -> failed_since etc
utime_t max_failed_since; ///< most recent failed_since
failure_info_t() {}
utime_t get_failed_since() {
if (max_failed_since == utime_t() && !reporters.empty()) {
// the old max must have canceled; recalculate.
for (auto p = reporters.begin(); p != reporters.end(); ++p)
if (p->second.failed_since > max_failed_since)
max_failed_since = p->second.failed_since;
}
return max_failed_since;
}
// set the message for the latest report.
void add_report(int who, utime_t failed_since, MonOpRequestRef op) {
[[maybe_unused]] auto [it, new_reporter] =
reporters.insert_or_assign(who, failure_reporter_t{failed_since, op});
if (new_reporter) {
if (max_failed_since != utime_t() && max_failed_since < failed_since) {
max_failed_since = failed_since;
}
}
}
void take_report_messages(std::list<MonOpRequestRef>& ls) {
for (auto p = reporters.begin(); p != reporters.end(); ++p) {
if (p->second.op) {
ls.push_back(p->second.op);
p->second.op.reset();
}
}
}
void cancel_report(int who) {
reporters.erase(who);
max_failed_since = utime_t();
}
};
class LastEpochClean {
struct Lec {
std::vector<epoch_t> epoch_by_pg;
ps_t next_missing = 0;
epoch_t floor = std::numeric_limits<epoch_t>::max();
void report(unsigned pg_num, ps_t pg, epoch_t last_epoch_clean);
};
std::map<uint64_t, Lec> report_by_pool;
public:
void report(unsigned pg_num, const pg_t& pg, epoch_t last_epoch_clean);
void remove_pool(uint64_t pool);
epoch_t get_lower_bound(const OSDMap& latest) const;
void dump(Formatter *f) const;
};
struct osdmap_manifest_t {
// all the maps we have pinned -- i.e., won't be removed unless
// they are inside a trim interval.
std::set<version_t> pinned;
osdmap_manifest_t() {}
version_t get_last_pinned() const
{
auto it = pinned.crbegin();
if (it == pinned.crend()) {
return 0;
}
return *it;
}
version_t get_first_pinned() const
{
auto it = pinned.cbegin();
if (it == pinned.cend()) {
return 0;
}
return *it;
}
bool is_pinned(version_t v) const
{
return pinned.find(v) != pinned.end();
}
void pin(version_t v)
{
pinned.insert(v);
}
version_t get_lower_closest_pinned(version_t v) const {
auto p = pinned.lower_bound(v);
if (p == pinned.cend()) {
return 0;
} else if (*p > v) {
if (p == pinned.cbegin()) {
return 0;
}
--p;
}
return *p;
}
void encode(ceph::buffer::list& bl) const
{
ENCODE_START(1, 1, bl);
encode(pinned, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START(1, bl);
decode(pinned, bl);
DECODE_FINISH(bl);
}
void decode(ceph::buffer::list& bl) {
auto p = bl.cbegin();
decode(p);
}
void dump(ceph::Formatter *f) {
f->dump_unsigned("first_pinned", get_first_pinned());
f->dump_unsigned("last_pinned", get_last_pinned());
f->open_array_section("pinned_maps");
for (auto& i : pinned) {
f->dump_unsigned("epoch", i);
}
f->close_section();
}
};
WRITE_CLASS_ENCODER(osdmap_manifest_t);
class OSDMonitor : public PaxosService,
public md_config_obs_t {
CephContext *cct;
public:
OSDMap osdmap;
// config observer
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) override;
// [leader]
OSDMap::Incremental pending_inc;
std::map<int, ceph::buffer::list> pending_metadata;
std::set<int> pending_metadata_rm;
std::map<int, failure_info_t> failure_info;
std::map<int,utime_t> down_pending_out; // osd down -> out
bool priority_convert = false;
std::map<int64_t,std::set<snapid_t>> pending_pseudo_purged_snaps;
std::shared_ptr<PriorityCache::PriCache> rocksdb_binned_kv_cache = nullptr;
std::shared_ptr<PriorityCache::Manager> pcm = nullptr;
ceph::mutex balancer_lock = ceph::make_mutex("OSDMonitor::balancer_lock");
std::map<int,double> osd_weight;
using osdmap_key_t = std::pair<version_t, uint64_t>;
using osdmap_cache_t = SimpleLRU<osdmap_key_t,
ceph::buffer::list,
std::less<osdmap_key_t>,
boost::hash<osdmap_key_t>>;
osdmap_cache_t inc_osd_cache;
osdmap_cache_t full_osd_cache;
bool has_osdmap_manifest;
osdmap_manifest_t osdmap_manifest;
bool check_failures(utime_t now);
bool check_failure(utime_t now, int target_osd, failure_info_t& fi);
utime_t get_grace_time(utime_t now, int target_osd, failure_info_t& fi) const;
bool is_failure_stale(utime_t now, failure_info_t& fi) const;
void force_failure(int target_osd, int by);
bool _have_pending_crush();
CrushWrapper &_get_stable_crush();
CrushWrapper _get_pending_crush();
enum FastReadType {
FAST_READ_OFF,
FAST_READ_ON,
FAST_READ_DEFAULT
};
struct CleanUpmapJob : public ParallelPGMapper::Job {
CephContext *cct;
const OSDMap& osdmap;
OSDMap::Incremental& pending_inc;
// lock to protect pending_inc form changing
// when checking is done
ceph::mutex pending_inc_lock =
ceph::make_mutex("CleanUpmapJob::pending_inc_lock");
CleanUpmapJob(CephContext *cct, const OSDMap& om, OSDMap::Incremental& pi)
: ParallelPGMapper::Job(&om),
cct(cct),
osdmap(om),
pending_inc(pi) {}
void process(const std::vector<pg_t>& to_check) override {
std::vector<pg_t> to_cancel;
std::map<pg_t, mempool::osdmap::vector<std::pair<int,int>>> to_remap;
osdmap.check_pg_upmaps(cct, to_check, &to_cancel, &to_remap);
// don't bother taking lock if nothing changes
if (!to_cancel.empty() || !to_remap.empty()) {
std::lock_guard l(pending_inc_lock);
osdmap.clean_pg_upmaps(cct, &pending_inc, to_cancel, to_remap);
}
}
void process(int64_t poolid, unsigned ps_begin, unsigned ps_end) override {}
void complete() override {}
}; // public as this will need to be accessible from TestTestOSDMap.cc
// svc
public:
void create_initial() override;
void get_store_prefixes(std::set<std::string>& s) const override;
private:
void update_from_paxos(bool *need_bootstrap) override;
void create_pending() override; // prepare a new pending
void encode_pending(MonitorDBStore::TransactionRef t) override;
void on_active() override;
void on_restart() override;
void on_shutdown() override;
/* osdmap full map prune */
void load_osdmap_manifest();
bool should_prune() const;
void _prune_update_trimmed(
MonitorDBStore::TransactionRef tx,
version_t first);
void prune_init(osdmap_manifest_t& manifest);
bool _prune_sanitize_options() const;
bool is_prune_enabled() const;
bool is_prune_supported() const;
bool do_prune(MonitorDBStore::TransactionRef tx);
// Priority cache control
uint32_t mon_osd_cache_size = 0; ///< Number of cached OSDMaps
uint64_t rocksdb_cache_size = 0; ///< Cache for kv Db
double cache_kv_ratio = 0; ///< Cache ratio dedicated to kv
double cache_inc_ratio = 0; ///< Cache ratio dedicated to inc
double cache_full_ratio = 0; ///< Cache ratio dedicated to full
uint64_t mon_memory_base = 0; ///< Mon base memory for cache autotuning
double mon_memory_fragmentation = 0; ///< Expected memory fragmentation
uint64_t mon_memory_target = 0; ///< Mon target memory for cache autotuning
uint64_t mon_memory_min = 0; ///< Min memory to cache osdmaps
bool mon_memory_autotune = false; ///< Cache auto tune setting
int register_cache_with_pcm();
int _set_cache_sizes();
int _set_cache_ratios();
void _set_new_cache_sizes();
void _set_cache_autotuning();
int _update_mon_cache_settings();
friend struct OSDMemCache;
friend struct IncCache;
friend struct FullCache;
/**
* we haven't delegated full version stashing to paxosservice for some time
* now, making this function useless in current context.
*/
void encode_full(MonitorDBStore::TransactionRef t) override { }
/**
* do not let paxosservice periodically stash full osdmaps, or we will break our
* locally-managed full maps. (update_from_paxos loads the latest and writes them
* out going forward from there, but if we just synced that may mean we skip some.)
*/
bool should_stash_full() override {
return false;
}
/**
* hook into trim to include the oldest full map in the trim transaction
*
* This ensures that anyone post-sync will have enough to rebuild their
* full osdmaps.
*/
void encode_trim_extra(MonitorDBStore::TransactionRef tx, version_t first) override;
void update_msgr_features();
/**
* check if the cluster supports the features required by the
* given crush map. Outputs the daemons which don't support it
* to the stringstream.
*
* @returns true if the map is passable, false otherwise
*/
bool validate_crush_against_features(const CrushWrapper *newcrush,
std::stringstream &ss);
void check_osdmap_subs();
void share_map_with_random_osd();
ceph::mutex prime_pg_temp_lock =
ceph::make_mutex("OSDMonitor::prime_pg_temp_lock");
struct PrimeTempJob : public ParallelPGMapper::Job {
OSDMonitor *osdmon;
PrimeTempJob(const OSDMap& om, OSDMonitor *m)
: ParallelPGMapper::Job(&om), osdmon(m) {}
void process(int64_t pool, unsigned ps_begin, unsigned ps_end) override {
for (unsigned ps = ps_begin; ps < ps_end; ++ps) {
pg_t pgid(ps, pool);
osdmon->prime_pg_temp(*osdmap, pgid);
}
}
void process(const std::vector<pg_t>& pgs) override {}
void complete() override {}
};
void maybe_prime_pg_temp();
void prime_pg_temp(const OSDMap& next, pg_t pgid);
ParallelPGMapper mapper; ///< for background pg work
OSDMapMapping mapping; ///< pg <-> osd mappings
std::unique_ptr<ParallelPGMapper::Job> mapping_job; ///< background mapping job
void start_mapping();
void update_logger();
void handle_query(PaxosServiceMessage *m);
bool preprocess_query(MonOpRequestRef op) override; // true if processed.
bool prepare_update(MonOpRequestRef op) override;
bool should_propose(double &delay) override;
version_t get_trim_to() const override;
bool can_mark_down(int o);
bool can_mark_up(int o);
bool can_mark_out(int o);
bool can_mark_in(int o);
// ...
MOSDMap *build_latest_full(uint64_t features);
MOSDMap *build_incremental(epoch_t first, epoch_t last, uint64_t features);
void send_full(MonOpRequestRef op);
void send_incremental(MonOpRequestRef op, epoch_t first);
public:
/**
* Make sure the existing (up) OSDs support the given features
* @return 0 on success, or an error code if any OSDs re missing features.
* @param ss Filled in with ane explanation of failure, if any
*/
int check_cluster_features(uint64_t features, std::stringstream &ss);
// @param req an optional op request, if the osdmaps are replies to it. so
// @c Monitor::send_reply() can mark_event with it.
void send_incremental(epoch_t first, MonSession *session, bool onetime,
MonOpRequestRef req = MonOpRequestRef());
private:
void print_utilization(std::ostream &out, ceph::Formatter *f, bool tree) const;
bool check_source(MonOpRequestRef op, uuid_d fsid);
bool preprocess_get_osdmap(MonOpRequestRef op);
bool preprocess_mark_me_down(MonOpRequestRef op);
friend class C_AckMarkedDown;
bool preprocess_failure(MonOpRequestRef op);
bool prepare_failure(MonOpRequestRef op);
bool prepare_mark_me_down(MonOpRequestRef op);
void process_failures();
void take_all_failures(std::list<MonOpRequestRef>& ls);
bool preprocess_mark_me_dead(MonOpRequestRef op);
bool prepare_mark_me_dead(MonOpRequestRef op);
bool preprocess_full(MonOpRequestRef op);
bool prepare_full(MonOpRequestRef op);
bool preprocess_boot(MonOpRequestRef op);
bool prepare_boot(MonOpRequestRef op);
void _booted(MonOpRequestRef op, bool logit);
void update_up_thru(int from, epoch_t up_thru);
bool preprocess_alive(MonOpRequestRef op);
bool prepare_alive(MonOpRequestRef op);
void _reply_map(MonOpRequestRef op, epoch_t e);
bool preprocess_pgtemp(MonOpRequestRef op);
bool prepare_pgtemp(MonOpRequestRef op);
bool preprocess_pg_created(MonOpRequestRef op);
bool prepare_pg_created(MonOpRequestRef op);
bool preprocess_pg_ready_to_merge(MonOpRequestRef op);
bool prepare_pg_ready_to_merge(MonOpRequestRef op);
int _check_remove_pool(int64_t pool_id, const pg_pool_t &pool, std::ostream *ss);
bool _check_become_tier(
int64_t tier_pool_id, const pg_pool_t *tier_pool,
int64_t base_pool_id, const pg_pool_t *base_pool,
int *err, std::ostream *ss) const;
bool _check_remove_tier(
int64_t base_pool_id, const pg_pool_t *base_pool, const pg_pool_t *tier_pool,
int *err, std::ostream *ss) const;
int _prepare_remove_pool(int64_t pool, std::ostream *ss, bool no_fake);
int _prepare_rename_pool(int64_t pool, std::string newname);
bool enforce_pool_op_caps(MonOpRequestRef op);
bool preprocess_pool_op (MonOpRequestRef op);
bool preprocess_pool_op_create (MonOpRequestRef op);
bool prepare_pool_op (MonOpRequestRef op);
bool prepare_pool_op_create (MonOpRequestRef op);
bool prepare_pool_op_delete(MonOpRequestRef op);
int crush_rename_bucket(const std::string& srcname,
const std::string& dstname,
std::ostream *ss);
void check_legacy_ec_plugin(const std::string& plugin,
const std::string& profile) const;
int normalize_profile(const std::string& profilename,
ceph::ErasureCodeProfile &profile,
bool force,
std::ostream *ss);
int crush_rule_create_erasure(const std::string &name,
const std::string &profile,
int *rule,
std::ostream *ss);
int get_crush_rule(const std::string &rule_name,
int *crush_rule,
std::ostream *ss);
int get_erasure_code(const std::string &erasure_code_profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) const;
int prepare_pool_crush_rule(const unsigned pool_type,
const std::string &erasure_code_profile,
const std::string &rule_name,
int *crush_rule,
std::ostream *ss);
bool erasure_code_profile_in_use(
const mempool::osdmap::map<int64_t, pg_pool_t> &pools,
const std::string &profile,
std::ostream *ss);
int parse_erasure_code_profile(const std::vector<std::string> &erasure_code_profile,
std::map<std::string,std::string> *erasure_code_profile_map,
std::ostream *ss);
int prepare_pool_size(const unsigned pool_type,
const std::string &erasure_code_profile,
uint8_t repl_size,
unsigned *size, unsigned *min_size,
std::ostream *ss);
int prepare_pool_stripe_width(const unsigned pool_type,
const std::string &erasure_code_profile,
unsigned *stripe_width,
std::ostream *ss);
uint32_t get_osd_num_by_crush(int crush_rule);
int check_pg_num(int64_t pool, int pg_num, int size, int crush_rule, std::ostream* ss);
int prepare_new_pool(std::string& name,
int crush_rule,
const std::string &crush_rule_name,
unsigned pg_num, unsigned pgp_num,
unsigned pg_num_min,
unsigned pg_num_max,
uint64_t repl_size,
const uint64_t target_size_bytes,
const float target_size_ratio,
const std::string &erasure_code_profile,
const unsigned pool_type,
const uint64_t expected_num_objects,
FastReadType fast_read,
std::string pg_autoscale_mode,
bool bulk,
bool crimson,
std::ostream *ss);
int prepare_new_pool(MonOpRequestRef op);
void set_pool_flags(int64_t pool_id, uint64_t flags);
void clear_pool_flags(int64_t pool_id, uint64_t flags);
bool update_pools_status();
bool _is_removed_snap(int64_t pool_id, snapid_t snapid);
bool _is_pending_removed_snap(int64_t pool_id, snapid_t snapid);
std::string make_purged_snap_epoch_key(epoch_t epoch);
std::string make_purged_snap_key(int64_t pool, snapid_t snap);
std::string make_purged_snap_key_value(int64_t pool, snapid_t snap, snapid_t num,
epoch_t epoch, ceph::buffer::list *v);
bool try_prune_purged_snaps();
int lookup_purged_snap(int64_t pool, snapid_t snap,
snapid_t *begin, snapid_t *end);
void insert_purged_snap_update(
int64_t pool,
snapid_t start, snapid_t end,
epoch_t epoch,
MonitorDBStore::TransactionRef t);
bool prepare_set_flag(MonOpRequestRef op, int flag);
bool prepare_unset_flag(MonOpRequestRef op, int flag);
void _pool_op_reply(MonOpRequestRef op,
int ret, epoch_t epoch, ceph::buffer::list *blp=NULL);
struct C_Booted : public C_MonOp {
OSDMonitor *cmon;
bool logit;
C_Booted(OSDMonitor *cm, MonOpRequestRef op_, bool l=true) :
C_MonOp(op_), cmon(cm), logit(l) {}
void _finish(int r) override {
if (r >= 0)
cmon->_booted(op, logit);
else if (r == -ECANCELED)
return;
else if (r == -EAGAIN)
cmon->dispatch(op);
else
ceph_abort_msg("bad C_Booted return value");
}
};
struct C_ReplyMap : public C_MonOp {
OSDMonitor *osdmon;
epoch_t e;
C_ReplyMap(OSDMonitor *o, MonOpRequestRef op_, epoch_t ee)
: C_MonOp(op_), osdmon(o), e(ee) {}
void _finish(int r) override {
if (r >= 0)
osdmon->_reply_map(op, e);
else if (r == -ECANCELED)
return;
else if (r == -EAGAIN)
osdmon->dispatch(op);
else
ceph_abort_msg("bad C_ReplyMap return value");
}
};
struct C_PoolOp : public C_MonOp {
OSDMonitor *osdmon;
int replyCode;
int epoch;
ceph::buffer::list reply_data;
C_PoolOp(OSDMonitor * osd, MonOpRequestRef op_, int rc, int e, ceph::buffer::list *rd=NULL) :
C_MonOp(op_), osdmon(osd), replyCode(rc), epoch(e) {
if (rd)
reply_data = *rd;
}
void _finish(int r) override {
if (r >= 0)
osdmon->_pool_op_reply(op, replyCode, epoch, &reply_data);
else if (r == -ECANCELED)
return;
else if (r == -EAGAIN)
osdmon->dispatch(op);
else
ceph_abort_msg("bad C_PoolOp return value");
}
};
bool preprocess_remove_snaps(MonOpRequestRef op);
bool prepare_remove_snaps(MonOpRequestRef op);
bool preprocess_get_purged_snaps(MonOpRequestRef op);
int load_metadata(int osd, std::map<std::string, std::string>& m,
std::ostream *err);
void count_metadata(const std::string& field, ceph::Formatter *f);
void reencode_incremental_map(ceph::buffer::list& bl, uint64_t features);
void reencode_full_map(ceph::buffer::list& bl, uint64_t features);
public:
void count_metadata(const std::string& field, std::map<std::string,int> *out);
void get_versions(std::map<std::string, std::list<std::string>> &versions);
protected:
int get_osd_objectstore_type(int osd, std::string *type);
bool is_pool_currently_all_bluestore(int64_t pool_id, const pg_pool_t &pool,
std::ostream *err);
// when we last received PG stats from each osd and the osd's osd_beacon_report_interval
std::map<int, std::pair<utime_t, int>> last_osd_report;
// TODO: use last_osd_report to store the osd report epochs, once we don't
// need to upgrade from pre-luminous releases.
std::map<int,epoch_t> osd_epochs;
LastEpochClean last_epoch_clean;
bool preprocess_beacon(MonOpRequestRef op);
bool prepare_beacon(MonOpRequestRef op);
epoch_t get_min_last_epoch_clean() const;
friend class C_UpdateCreatingPGs;
std::map<int, std::map<epoch_t, std::set<spg_t>>> creating_pgs_by_osd_epoch;
std::vector<pg_t> pending_created_pgs;
// the epoch when the pg mapping was calculated
epoch_t creating_pgs_epoch = 0;
creating_pgs_t creating_pgs;
mutable std::mutex creating_pgs_lock;
creating_pgs_t update_pending_pgs(const OSDMap::Incremental& inc,
const OSDMap& nextmap);
unsigned scan_for_creating_pgs(
const mempool::osdmap::map<int64_t,pg_pool_t>& pools,
const mempool::osdmap::set<int64_t>& removed_pools,
utime_t modified,
creating_pgs_t* creating_pgs) const;
std::pair<int32_t, pg_t> get_parent_pg(pg_t pgid) const;
void update_creating_pgs();
void check_pg_creates_subs();
epoch_t send_pg_creates(int osd, Connection *con, epoch_t next) const;
int32_t _allocate_osd_id(int32_t* existing_id);
int get_grace_interval_threshold();
bool grace_interval_threshold_exceeded(int last_failed);
void set_default_laggy_params(int target_osd);
int parse_pgid(const cmdmap_t& cmdmap, std::stringstream &ss,
pg_t &pgid, std::optional<std::string> pgidstr = std::nullopt);
public:
OSDMonitor(CephContext *cct, Monitor &mn, Paxos &p, const std::string& service_name);
void tick() override; // check state, take actions
bool preprocess_command(MonOpRequestRef op);
bool prepare_command(MonOpRequestRef op);
bool prepare_command_impl(MonOpRequestRef op, const cmdmap_t& cmdmap);
int validate_osd_create(
const int32_t id,
const uuid_d& uuid,
const bool check_osd_exists,
int32_t* existing_id,
std::stringstream& ss);
int prepare_command_osd_create(
const int32_t id,
const uuid_d& uuid,
int32_t* existing_id,
std::stringstream& ss);
void do_osd_create(const int32_t id, const uuid_d& uuid,
const std::string& device_class,
int32_t* new_id);
int prepare_command_osd_purge(int32_t id, std::stringstream& ss);
int prepare_command_osd_destroy(int32_t id, std::stringstream& ss);
int _prepare_command_osd_crush_remove(
CrushWrapper &newcrush,
int32_t id,
int32_t ancestor,
bool has_ancestor,
bool unlink_only);
void do_osd_crush_remove(CrushWrapper& newcrush);
int prepare_command_osd_crush_remove(
CrushWrapper &newcrush,
int32_t id,
int32_t ancestor,
bool has_ancestor,
bool unlink_only);
int prepare_command_osd_remove(int32_t id);
int prepare_command_osd_new(
MonOpRequestRef op,
const cmdmap_t& cmdmap,
const std::map<std::string,std::string>& secrets,
std::stringstream &ss,
ceph::Formatter *f);
int prepare_command_pool_set(const cmdmap_t& cmdmap,
std::stringstream& ss);
int prepare_command_pool_application(const std::string &prefix,
const cmdmap_t& cmdmap,
std::stringstream& ss);
int preprocess_command_pool_application(const std::string &prefix,
const cmdmap_t& cmdmap,
std::stringstream& ss,
bool *modified);
int _command_pool_application(const std::string &prefix,
const cmdmap_t& cmdmap,
std::stringstream& ss,
bool *modified,
bool preparing);
bool handle_osd_timeouts(const utime_t &now,
std::map<int, std::pair<utime_t, int>> &last_osd_report);
void send_latest(MonOpRequestRef op, epoch_t start=0);
void send_latest_now_nodelete(MonOpRequestRef op, epoch_t start=0) {
op->mark_osdmon_event(__func__);
send_incremental(op, start);
}
int get_version(version_t ver, ceph::buffer::list& bl) override;
int get_version(version_t ver, uint64_t feature, ceph::buffer::list& bl);
int get_version_full(version_t ver, uint64_t feature, ceph::buffer::list& bl);
int get_version_full(version_t ver, ceph::buffer::list& bl) override;
int get_inc(version_t ver, OSDMap::Incremental& inc);
int get_full_from_pinned_map(version_t ver, ceph::buffer::list& bl);
epoch_t blocklist(const entity_addrvec_t& av, utime_t until);
epoch_t blocklist(entity_addr_t a, utime_t until);
void dump_info(ceph::Formatter *f);
int dump_osd_metadata(int osd, ceph::Formatter *f, std::ostream *err);
void print_nodes(ceph::Formatter *f);
void check_osdmap_sub(Subscription *sub);
void check_pg_creates_sub(Subscription *sub);
void do_application_enable(int64_t pool_id, const std::string &app_name,
const std::string &app_key="",
const std::string &app_value="",
bool force=false);
void do_set_pool_opt(int64_t pool_id, pool_opts_t::key_t opt,
pool_opts_t::value_t);
void add_flag(int flag) {
if (!(osdmap.flags & flag)) {
if (pending_inc.new_flags < 0)
pending_inc.new_flags = osdmap.flags;
pending_inc.new_flags |= flag;
}
}
void remove_flag(int flag) {
if(osdmap.flags & flag) {
if (pending_inc.new_flags < 0)
pending_inc.new_flags = osdmap.flags;
pending_inc.new_flags &= ~flag;
}
}
void convert_pool_priorities(void);
/**
* Find the pools which are requested to be put into stretch mode,
* validate that they are allowed to be in stretch mode (eg, are replicated)
* and place copies of them in the pools set.
* This does not make any changes to the pools or state; it's just
* a safety-check-and-collect function.
*/
void try_enable_stretch_mode_pools(std::stringstream& ss, bool *okay,
int *errcode,
std::set<pg_pool_t*>* pools,
const std::string& new_crush_rule);
/**
* Check validity of inputs and OSD/CRUSH state to
* engage stretch mode. Designed to be used with
* MonmapMonitor::try_enable_stretch_mode() where we call both twice,
* first with commit=false to validate.
* @param ss: a stringstream to write errors into
* @param okay: Filled to true if okay, false if validation fails
* @param errcode: filled with -errno if there's a problem
* @param commit: true if we should commit the change, false if just testing
* @param dividing_bucket: the bucket type (eg 'dc') that divides the cluster
* @param bucket_count: The number of buckets required in peering.
* Currently must be 2.
* @param pools: The pg_pool_ts which are being set to stretch mode (obtained
* from try_enable_stretch_mode_pools()).
* @param new_crush_rule: The crush rule to set the pools to.
*/
void try_enable_stretch_mode(std::stringstream& ss, bool *okay,
int *errcode, bool commit,
const std::string& dividing_bucket,
uint32_t bucket_count,
const std::set<pg_pool_t*>& pools,
const std::string& new_crush_rule);
/**
* Check the input dead_buckets mapping (buckets->dead monitors) to see
* if the OSDs are also down. If so, fill in really_down_buckets and
* really_down_mons and return true; else return false.
*/
bool check_for_dead_crush_zones(const std::map<std::string,std::set<std::string>>& dead_buckets,
std::set<int> *really_down_buckets,
std::set<std::string> *really_down_mons);
/**
* Set degraded mode in the OSDMap, adding the given dead buckets to the dead set
* and using the live_zones (should presently be size 1)
*/
void trigger_degraded_stretch_mode(const std::set<int>& dead_buckets,
const std::set<std::string>& live_zones);
/**
* This is just to maintain stretch_recovery_triggered; below
*/
void set_degraded_stretch_mode();
/**
* Set recovery stretch mode in the OSDMap, resetting pool size back to normal
*/
void trigger_recovery_stretch_mode();
/**
* This is just to maintain stretch_recovery_triggered; below
*/
void set_recovery_stretch_mode();
/**
* This is just to maintain stretch_recovery_triggered; below
*/
void set_healthy_stretch_mode();
/**
* Tells the OSD there's a new pg digest, in case it's interested.
* (It's interested when in recovering stretch mode.)
*/
void notify_new_pg_digest();
/**
* Check if we can exit recovery stretch mode and go back to normal.
* @param force If true, we will force the exit through once it is legal,
* without regard to the reported PG status.
*/
void try_end_recovery_stretch_mode(bool force);
/**
* Sets the osdmap and pg_pool_t values back to healthy stretch mode status.
*/
void trigger_healthy_stretch_mode();
/**
* Obtain the crush rule being used for stretch pools.
* Note that right now this is heuristic and simply selects the
* most-used rule on replicated stretch pools.
* @return the crush rule ID, or a negative errno
*/
int get_replicated_stretch_crush_rule();
private:
utime_t stretch_recovery_triggered; // what time we committed a switch to recovery mode
};
#endif
| 30,372 | 33.475596 | 98 |
h
|
null |
ceph-main/src/mon/PGMap.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/algorithm/string.hpp>
#include "include/rados.h"
#include "PGMap.h"
#define dout_subsys ceph_subsys_mon
#include "common/debug.h"
#include "common/Clock.h"
#include "common/Formatter.h"
#include "global/global_context.h"
#include "include/ceph_features.h"
#include "include/stringify.h"
#include "osd/osd_types.h"
#include "osd/OSDMap.h"
#include <boost/range/adaptor/reversed.hpp>
#define dout_context g_ceph_context
using std::list;
using std::make_pair;
using std::map;
using std::pair;
using std::ostream;
using std::ostringstream;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
using ceph::bufferlist;
using ceph::fixed_u_to_string;
using ceph::common::cmd_getval;
using ceph::common::cmd_getval_or;
using ceph::common::cmd_putval;
MEMPOOL_DEFINE_OBJECT_FACTORY(PGMapDigest, pgmap_digest, pgmap);
MEMPOOL_DEFINE_OBJECT_FACTORY(PGMap, pgmap, pgmap);
MEMPOOL_DEFINE_OBJECT_FACTORY(PGMap::Incremental, pgmap_inc, pgmap);
// ---------------------
// PGMapDigest
void PGMapDigest::encode(bufferlist& bl, uint64_t features) const
{
// NOTE: see PGMap::encode_digest
uint8_t v = 4;
assert(HAVE_FEATURE(features, SERVER_NAUTILUS));
ENCODE_START(v, 1, bl);
encode(num_pg, bl);
encode(num_pg_active, bl);
encode(num_pg_unknown, bl);
encode(num_osd, bl);
encode(pg_pool_sum, bl, features);
encode(pg_sum, bl, features);
encode(osd_sum, bl, features);
encode(num_pg_by_state, bl);
encode(num_pg_by_osd, bl);
encode(num_pg_by_pool, bl);
encode(osd_last_seq, bl);
encode(per_pool_sum_delta, bl, features);
encode(per_pool_sum_deltas_stamps, bl);
encode(pg_sum_delta, bl, features);
encode(stamp_delta, bl);
encode(avail_space_by_rule, bl);
encode(purged_snaps, bl);
encode(osd_sum_by_class, bl, features);
ENCODE_FINISH(bl);
}
void PGMapDigest::decode(bufferlist::const_iterator& p)
{
DECODE_START(4, p);
assert(struct_v >= 4);
decode(num_pg, p);
decode(num_pg_active, p);
decode(num_pg_unknown, p);
decode(num_osd, p);
decode(pg_pool_sum, p);
decode(pg_sum, p);
decode(osd_sum, p);
decode(num_pg_by_state, p);
decode(num_pg_by_osd, p);
decode(num_pg_by_pool, p);
decode(osd_last_seq, p);
decode(per_pool_sum_delta, p);
decode(per_pool_sum_deltas_stamps, p);
decode(pg_sum_delta, p);
decode(stamp_delta, p);
decode(avail_space_by_rule, p);
decode(purged_snaps, p);
decode(osd_sum_by_class, p);
DECODE_FINISH(p);
}
void PGMapDigest::dump(ceph::Formatter *f) const
{
f->dump_unsigned("num_pg", num_pg);
f->dump_unsigned("num_pg_active", num_pg_active);
f->dump_unsigned("num_pg_unknown", num_pg_unknown);
f->dump_unsigned("num_osd", num_osd);
f->dump_object("pool_sum", pg_sum);
f->dump_object("osd_sum", osd_sum);
f->open_object_section("osd_sum_by_class");
for (auto& i : osd_sum_by_class) {
f->dump_object(i.first.c_str(), i.second);
}
f->close_section();
f->open_array_section("pool_stats");
for (auto& p : pg_pool_sum) {
f->open_object_section("pool_stat");
f->dump_int("poolid", p.first);
auto q = num_pg_by_pool.find(p.first);
if (q != num_pg_by_pool.end())
f->dump_unsigned("num_pg", q->second);
p.second.dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("osd_stats");
int i = 0;
// TODO: this isn't really correct since we can dump non-existent OSDs
// I dunno what osd_last_seq is set to in that case...
for (auto& p : osd_last_seq) {
f->open_object_section("osd_stat");
f->dump_int("osd", i);
f->dump_unsigned("seq", p);
f->close_section();
++i;
}
f->close_section();
f->open_array_section("num_pg_by_state");
for (auto& p : num_pg_by_state) {
f->open_object_section("count");
f->dump_string("state", pg_state_string(p.first));
f->dump_unsigned("num", p.second);
f->close_section();
}
f->close_section();
f->open_array_section("num_pg_by_osd");
for (auto& p : num_pg_by_osd) {
f->open_object_section("count");
f->dump_unsigned("osd", p.first);
f->dump_unsigned("num_primary_pg", p.second.primary);
f->dump_unsigned("num_acting_pg", p.second.acting);
f->dump_unsigned("num_up_not_acting_pg", p.second.up_not_acting);
f->close_section();
}
f->close_section();
f->open_array_section("purged_snaps");
for (auto& j : purged_snaps) {
f->open_object_section("pool");
f->dump_int("pool", j.first);
f->open_object_section("purged_snaps");
for (auto i = j.second.begin(); i != j.second.end(); ++i) {
f->open_object_section("interval");
f->dump_stream("start") << i.get_start();
f->dump_stream("length") << i.get_len();
f->close_section();
}
f->close_section();
f->close_section();
}
f->close_section();
}
void PGMapDigest::generate_test_instances(list<PGMapDigest*>& ls)
{
ls.push_back(new PGMapDigest);
}
inline std::string percentify(const float& a) {
std::stringstream ss;
if (a < 0.01)
ss << "0";
else
ss << std::fixed << std::setprecision(2) << a;
return ss.str();
}
void PGMapDigest::print_summary(ceph::Formatter *f, ostream *out) const
{
if (f)
f->open_array_section("pgs_by_state");
// list is descending numeric order (by count)
std::multimap<int,uint64_t> state_by_count; // count -> state
for (auto p = num_pg_by_state.begin();
p != num_pg_by_state.end();
++p) {
state_by_count.insert(make_pair(p->second, p->first));
}
if (f) {
for (auto p = state_by_count.rbegin();
p != state_by_count.rend();
++p)
{
f->open_object_section("pgs_by_state_element");
f->dump_string("state_name", pg_state_string(p->second));
f->dump_unsigned("count", p->first);
f->close_section();
}
}
if (f)
f->close_section();
if (f) {
f->dump_unsigned("num_pgs", num_pg);
f->dump_unsigned("num_pools", pg_pool_sum.size());
f->dump_unsigned("num_objects", pg_sum.stats.sum.num_objects);
f->dump_unsigned("data_bytes", pg_sum.stats.sum.num_bytes);
f->dump_unsigned("bytes_used", osd_sum.statfs.get_used_raw());
f->dump_unsigned("bytes_avail", osd_sum.statfs.available);
f->dump_unsigned("bytes_total", osd_sum.statfs.total);
} else {
*out << " pools: " << pg_pool_sum.size() << " pools, "
<< num_pg << " pgs\n";
*out << " objects: " << si_u_t(pg_sum.stats.sum.num_objects) << " objects, "
<< byte_u_t(pg_sum.stats.sum.num_bytes) << "\n";
*out << " usage: "
<< byte_u_t(osd_sum.statfs.get_used_raw()) << " used, "
<< byte_u_t(osd_sum.statfs.available) << " / "
<< byte_u_t(osd_sum.statfs.total) << " avail\n";
*out << " pgs: ";
}
bool pad = false;
if (num_pg_unknown > 0) {
float p = (float)num_pg_unknown / (float)num_pg;
if (f) {
f->dump_float("unknown_pgs_ratio", p);
} else {
char b[20];
snprintf(b, sizeof(b), "%.3lf", p * 100.0);
*out << b << "% pgs unknown\n";
pad = true;
}
}
int num_pg_inactive = num_pg - num_pg_active - num_pg_unknown;
if (num_pg_inactive > 0) {
float p = (float)num_pg_inactive / (float)num_pg;
if (f) {
f->dump_float("inactive_pgs_ratio", p);
} else {
if (pad) {
*out << " ";
}
char b[20];
snprintf(b, sizeof(b), "%.3f", p * 100.0);
*out << b << "% pgs not active\n";
pad = true;
}
}
list<string> sl;
overall_recovery_summary(f, &sl);
if (!f && !sl.empty()) {
for (auto p = sl.begin(); p != sl.end(); ++p) {
if (pad) {
*out << " ";
}
*out << *p << "\n";
pad = true;
}
}
sl.clear();
if (!f) {
unsigned max_width = 1;
for (auto p = state_by_count.rbegin(); p != state_by_count.rend(); ++p)
{
std::stringstream ss;
ss << p->first;
max_width = std::max<size_t>(ss.str().size(), max_width);
}
for (auto p = state_by_count.rbegin(); p != state_by_count.rend(); ++p)
{
if (pad) {
*out << " ";
}
pad = true;
out->setf(std::ios::left);
*out << std::setw(max_width) << p->first
<< " " << pg_state_string(p->second) << "\n";
out->unsetf(std::ios::left);
}
}
ostringstream ss_rec_io;
overall_recovery_rate_summary(f, &ss_rec_io);
ostringstream ss_client_io;
overall_client_io_rate_summary(f, &ss_client_io);
ostringstream ss_cache_io;
overall_cache_io_rate_summary(f, &ss_cache_io);
if (!f && (ss_client_io.str().length() || ss_rec_io.str().length()
|| ss_cache_io.str().length())) {
*out << "\n \n";
*out << " io:\n";
}
if (!f && ss_client_io.str().length())
*out << " client: " << ss_client_io.str() << "\n";
if (!f && ss_rec_io.str().length())
*out << " recovery: " << ss_rec_io.str() << "\n";
if (!f && ss_cache_io.str().length())
*out << " cache: " << ss_cache_io.str() << "\n";
}
void PGMapDigest::print_oneline_summary(ceph::Formatter *f, ostream *out) const
{
std::stringstream ss;
if (f)
f->open_array_section("num_pg_by_state");
for (auto p = num_pg_by_state.begin();
p != num_pg_by_state.end();
++p) {
if (f) {
f->open_object_section("state");
f->dump_string("name", pg_state_string(p->first));
f->dump_unsigned("num", p->second);
f->close_section();
}
if (p != num_pg_by_state.begin())
ss << ", ";
ss << p->second << " " << pg_state_string(p->first);
}
if (f)
f->close_section();
string states = ss.str();
if (out)
*out << num_pg << " pgs: "
<< states << "; "
<< byte_u_t(pg_sum.stats.sum.num_bytes) << " data, "
<< byte_u_t(osd_sum.statfs.get_used()) << " used, "
<< byte_u_t(osd_sum.statfs.available) << " / "
<< byte_u_t(osd_sum.statfs.total) << " avail";
if (f) {
f->dump_unsigned("num_pgs", num_pg);
f->dump_unsigned("num_bytes", pg_sum.stats.sum.num_bytes);
f->dump_int("total_bytes", osd_sum.statfs.total);
f->dump_int("total_avail_bytes", osd_sum.statfs.available);
f->dump_int("total_used_bytes", osd_sum.statfs.get_used());
f->dump_int("total_used_raw_bytes", osd_sum.statfs.get_used_raw());
}
// make non-negative; we can get negative values if osds send
// uncommitted stats and then "go backward" or if they are just
// buggy/wrong.
pool_stat_t pos_delta = pg_sum_delta;
pos_delta.floor(0);
if (pos_delta.stats.sum.num_rd ||
pos_delta.stats.sum.num_wr) {
if (out)
*out << "; ";
if (pos_delta.stats.sum.num_rd) {
int64_t rd = (pos_delta.stats.sum.num_rd_kb << 10) / (double)stamp_delta;
if (out)
*out << byte_u_t(rd) << "/s rd, ";
if (f)
f->dump_unsigned("read_bytes_sec", rd);
}
if (pos_delta.stats.sum.num_wr) {
int64_t wr = (pos_delta.stats.sum.num_wr_kb << 10) / (double)stamp_delta;
if (out)
*out << byte_u_t(wr) << "/s wr, ";
if (f)
f->dump_unsigned("write_bytes_sec", wr);
}
int64_t iops = (pos_delta.stats.sum.num_rd + pos_delta.stats.sum.num_wr) / (double)stamp_delta;
if (out)
*out << si_u_t(iops) << " op/s";
if (f)
f->dump_unsigned("io_sec", iops);
}
list<string> sl;
overall_recovery_summary(f, &sl);
if (out)
for (auto p = sl.begin(); p != sl.end(); ++p)
*out << "; " << *p;
std::stringstream ssr;
overall_recovery_rate_summary(f, &ssr);
if (out && ssr.str().length())
*out << "; " << ssr.str() << " recovering";
}
void PGMapDigest::get_recovery_stats(
double *misplaced_ratio,
double *degraded_ratio,
double *inactive_pgs_ratio,
double *unknown_pgs_ratio) const
{
if (pg_sum.stats.sum.num_objects_degraded &&
pg_sum.stats.sum.num_object_copies > 0) {
*degraded_ratio = (double)pg_sum.stats.sum.num_objects_degraded /
(double)pg_sum.stats.sum.num_object_copies;
} else {
*degraded_ratio = 0;
}
if (pg_sum.stats.sum.num_objects_misplaced &&
pg_sum.stats.sum.num_object_copies > 0) {
*misplaced_ratio = (double)pg_sum.stats.sum.num_objects_misplaced /
(double)pg_sum.stats.sum.num_object_copies;
} else {
*misplaced_ratio = 0;
}
if (num_pg > 0) {
int num_pg_inactive = num_pg - num_pg_active - num_pg_unknown;
*inactive_pgs_ratio = (double)num_pg_inactive / (double)num_pg;
*unknown_pgs_ratio = (double)num_pg_unknown / (double)num_pg;
} else {
*inactive_pgs_ratio = 0;
*unknown_pgs_ratio = 0;
}
}
void PGMapDigest::recovery_summary(ceph::Formatter *f, list<string> *psl,
const pool_stat_t& pool_sum) const
{
if (pool_sum.stats.sum.num_objects_degraded && pool_sum.stats.sum.num_object_copies > 0) {
double pc = (double)pool_sum.stats.sum.num_objects_degraded /
(double)pool_sum.stats.sum.num_object_copies * (double)100.0;
char b[20];
snprintf(b, sizeof(b), "%.3lf", pc);
if (f) {
f->dump_unsigned("degraded_objects", pool_sum.stats.sum.num_objects_degraded);
f->dump_unsigned("degraded_total", pool_sum.stats.sum.num_object_copies);
f->dump_float("degraded_ratio", pc / 100.0);
} else {
ostringstream ss;
ss << pool_sum.stats.sum.num_objects_degraded
<< "/" << pool_sum.stats.sum.num_object_copies << " objects degraded (" << b << "%)";
psl->push_back(ss.str());
}
}
if (pool_sum.stats.sum.num_objects_misplaced && pool_sum.stats.sum.num_object_copies > 0) {
double pc = (double)pool_sum.stats.sum.num_objects_misplaced /
(double)pool_sum.stats.sum.num_object_copies * (double)100.0;
char b[20];
snprintf(b, sizeof(b), "%.3lf", pc);
if (f) {
f->dump_unsigned("misplaced_objects", pool_sum.stats.sum.num_objects_misplaced);
f->dump_unsigned("misplaced_total", pool_sum.stats.sum.num_object_copies);
f->dump_float("misplaced_ratio", pc / 100.0);
} else {
ostringstream ss;
ss << pool_sum.stats.sum.num_objects_misplaced
<< "/" << pool_sum.stats.sum.num_object_copies << " objects misplaced (" << b << "%)";
psl->push_back(ss.str());
}
}
if (pool_sum.stats.sum.num_objects_unfound && pool_sum.stats.sum.num_objects) {
double pc = (double)pool_sum.stats.sum.num_objects_unfound /
(double)pool_sum.stats.sum.num_objects * (double)100.0;
char b[20];
snprintf(b, sizeof(b), "%.3lf", pc);
if (f) {
f->dump_unsigned("unfound_objects", pool_sum.stats.sum.num_objects_unfound);
f->dump_unsigned("unfound_total", pool_sum.stats.sum.num_objects);
f->dump_float("unfound_ratio", pc / 100.0);
} else {
ostringstream ss;
ss << pool_sum.stats.sum.num_objects_unfound
<< "/" << pool_sum.stats.sum.num_objects << " objects unfound (" << b << "%)";
psl->push_back(ss.str());
}
}
}
void PGMapDigest::recovery_rate_summary(ceph::Formatter *f, ostream *out,
const pool_stat_t& delta_sum,
utime_t delta_stamp) const
{
// make non-negative; we can get negative values if osds send
// uncommitted stats and then "go backward" or if they are just
// buggy/wrong.
pool_stat_t pos_delta = delta_sum;
pos_delta.floor(0);
if (pos_delta.stats.sum.num_objects_recovered ||
pos_delta.stats.sum.num_bytes_recovered ||
pos_delta.stats.sum.num_keys_recovered) {
int64_t objps = pos_delta.stats.sum.num_objects_recovered / (double)delta_stamp;
int64_t bps = pos_delta.stats.sum.num_bytes_recovered / (double)delta_stamp;
int64_t kps = pos_delta.stats.sum.num_keys_recovered / (double)delta_stamp;
if (f) {
f->dump_int("recovering_objects_per_sec", objps);
f->dump_int("recovering_bytes_per_sec", bps);
f->dump_int("recovering_keys_per_sec", kps);
f->dump_int("num_objects_recovered", pos_delta.stats.sum.num_objects_recovered);
f->dump_int("num_bytes_recovered", pos_delta.stats.sum.num_bytes_recovered);
f->dump_int("num_keys_recovered", pos_delta.stats.sum.num_keys_recovered);
} else {
*out << byte_u_t(bps) << "/s";
if (pos_delta.stats.sum.num_keys_recovered)
*out << ", " << si_u_t(kps) << " keys/s";
*out << ", " << si_u_t(objps) << " objects/s";
}
}
}
void PGMapDigest::overall_recovery_rate_summary(ceph::Formatter *f, ostream *out) const
{
recovery_rate_summary(f, out, pg_sum_delta, stamp_delta);
}
void PGMapDigest::overall_recovery_summary(ceph::Formatter *f, list<string> *psl) const
{
recovery_summary(f, psl, pg_sum);
}
void PGMapDigest::pool_recovery_rate_summary(ceph::Formatter *f, ostream *out,
uint64_t poolid) const
{
auto p = per_pool_sum_delta.find(poolid);
if (p == per_pool_sum_delta.end())
return;
auto ts = per_pool_sum_deltas_stamps.find(p->first);
ceph_assert(ts != per_pool_sum_deltas_stamps.end());
recovery_rate_summary(f, out, p->second.first, ts->second);
}
void PGMapDigest::pool_recovery_summary(ceph::Formatter *f, list<string> *psl,
uint64_t poolid) const
{
auto p = pg_pool_sum.find(poolid);
if (p == pg_pool_sum.end())
return;
recovery_summary(f, psl, p->second);
}
void PGMapDigest::client_io_rate_summary(ceph::Formatter *f, ostream *out,
const pool_stat_t& delta_sum,
utime_t delta_stamp) const
{
pool_stat_t pos_delta = delta_sum;
pos_delta.floor(0);
if (pos_delta.stats.sum.num_rd ||
pos_delta.stats.sum.num_wr) {
if (pos_delta.stats.sum.num_rd) {
int64_t rd = (pos_delta.stats.sum.num_rd_kb << 10) / (double)delta_stamp;
if (f) {
f->dump_int("read_bytes_sec", rd);
} else {
*out << byte_u_t(rd) << "/s rd, ";
}
}
if (pos_delta.stats.sum.num_wr) {
int64_t wr = (pos_delta.stats.sum.num_wr_kb << 10) / (double)delta_stamp;
if (f) {
f->dump_int("write_bytes_sec", wr);
} else {
*out << byte_u_t(wr) << "/s wr, ";
}
}
int64_t iops_rd = pos_delta.stats.sum.num_rd / (double)delta_stamp;
int64_t iops_wr = pos_delta.stats.sum.num_wr / (double)delta_stamp;
if (f) {
f->dump_int("read_op_per_sec", iops_rd);
f->dump_int("write_op_per_sec", iops_wr);
} else {
*out << si_u_t(iops_rd) << " op/s rd, " << si_u_t(iops_wr) << " op/s wr";
}
}
}
void PGMapDigest::overall_client_io_rate_summary(ceph::Formatter *f, ostream *out) const
{
client_io_rate_summary(f, out, pg_sum_delta, stamp_delta);
}
void PGMapDigest::pool_client_io_rate_summary(ceph::Formatter *f, ostream *out,
uint64_t poolid) const
{
auto p = per_pool_sum_delta.find(poolid);
if (p == per_pool_sum_delta.end())
return;
auto ts = per_pool_sum_deltas_stamps.find(p->first);
ceph_assert(ts != per_pool_sum_deltas_stamps.end());
client_io_rate_summary(f, out, p->second.first, ts->second);
}
void PGMapDigest::cache_io_rate_summary(ceph::Formatter *f, ostream *out,
const pool_stat_t& delta_sum,
utime_t delta_stamp) const
{
pool_stat_t pos_delta = delta_sum;
pos_delta.floor(0);
bool have_output = false;
if (pos_delta.stats.sum.num_flush) {
int64_t flush = (pos_delta.stats.sum.num_flush_kb << 10) / (double)delta_stamp;
if (f) {
f->dump_int("flush_bytes_sec", flush);
} else {
*out << byte_u_t(flush) << "/s flush";
have_output = true;
}
}
if (pos_delta.stats.sum.num_evict) {
int64_t evict = (pos_delta.stats.sum.num_evict_kb << 10) / (double)delta_stamp;
if (f) {
f->dump_int("evict_bytes_sec", evict);
} else {
if (have_output)
*out << ", ";
*out << byte_u_t(evict) << "/s evict";
have_output = true;
}
}
if (pos_delta.stats.sum.num_promote) {
int64_t promote = pos_delta.stats.sum.num_promote / (double)delta_stamp;
if (f) {
f->dump_int("promote_op_per_sec", promote);
} else {
if (have_output)
*out << ", ";
*out << si_u_t(promote) << " op/s promote";
have_output = true;
}
}
if (pos_delta.stats.sum.num_flush_mode_low) {
if (f) {
f->dump_int("num_flush_mode_low", pos_delta.stats.sum.num_flush_mode_low);
} else {
if (have_output)
*out << ", ";
*out << si_u_t(pos_delta.stats.sum.num_flush_mode_low) << " PGs flushing";
have_output = true;
}
}
if (pos_delta.stats.sum.num_flush_mode_high) {
if (f) {
f->dump_int("num_flush_mode_high", pos_delta.stats.sum.num_flush_mode_high);
} else {
if (have_output)
*out << ", ";
*out << si_u_t(pos_delta.stats.sum.num_flush_mode_high) << " PGs flushing (high)";
have_output = true;
}
}
if (pos_delta.stats.sum.num_evict_mode_some) {
if (f) {
f->dump_int("num_evict_mode_some", pos_delta.stats.sum.num_evict_mode_some);
} else {
if (have_output)
*out << ", ";
*out << si_u_t(pos_delta.stats.sum.num_evict_mode_some) << " PGs evicting";
have_output = true;
}
}
if (pos_delta.stats.sum.num_evict_mode_full) {
if (f) {
f->dump_int("num_evict_mode_full", pos_delta.stats.sum.num_evict_mode_full);
} else {
if (have_output)
*out << ", ";
*out << si_u_t(pos_delta.stats.sum.num_evict_mode_full) << " PGs evicting (full)";
}
}
}
void PGMapDigest::overall_cache_io_rate_summary(ceph::Formatter *f, ostream *out) const
{
cache_io_rate_summary(f, out, pg_sum_delta, stamp_delta);
}
void PGMapDigest::pool_cache_io_rate_summary(ceph::Formatter *f, ostream *out,
uint64_t poolid) const
{
auto p = per_pool_sum_delta.find(poolid);
if (p == per_pool_sum_delta.end())
return;
auto ts = per_pool_sum_deltas_stamps.find(p->first);
ceph_assert(ts != per_pool_sum_deltas_stamps.end());
cache_io_rate_summary(f, out, p->second.first, ts->second);
}
ceph_statfs PGMapDigest::get_statfs(OSDMap &osdmap,
std::optional<int64_t> data_pool) const
{
ceph_statfs statfs;
bool filter = false;
object_stat_sum_t sum;
if (data_pool) {
auto i = pg_pool_sum.find(*data_pool);
if (i != pg_pool_sum.end()) {
sum = i->second.stats.sum;
filter = true;
}
}
if (filter) {
statfs.kb_used = (sum.num_bytes >> 10);
statfs.kb_avail = get_pool_free_space(osdmap, *data_pool) >> 10;
statfs.num_objects = sum.num_objects;
statfs.kb = statfs.kb_used + statfs.kb_avail;
} else {
// these are in KB.
statfs.kb = osd_sum.statfs.kb();
statfs.kb_used = osd_sum.statfs.kb_used_raw();
statfs.kb_avail = osd_sum.statfs.kb_avail();
statfs.num_objects = pg_sum.stats.sum.num_objects;
}
return statfs;
}
void PGMapDigest::dump_pool_stats_full(
const OSDMap &osd_map,
stringstream *ss,
ceph::Formatter *f,
bool verbose) const
{
TextTable tbl;
if (f) {
f->open_array_section("pools");
} else {
tbl.define_column("POOL", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("ID", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("PGS", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("STORED", TextTable::RIGHT, TextTable::RIGHT);
if (verbose) {
tbl.define_column("(DATA)", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("(OMAP)", TextTable::RIGHT, TextTable::RIGHT);
}
tbl.define_column("OBJECTS", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("USED", TextTable::RIGHT, TextTable::RIGHT);
if (verbose) {
tbl.define_column("(DATA)", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("(OMAP)", TextTable::RIGHT, TextTable::RIGHT);
}
tbl.define_column("%USED", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("MAX AVAIL", TextTable::RIGHT, TextTable::RIGHT);
if (verbose) {
tbl.define_column("QUOTA OBJECTS", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("QUOTA BYTES", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("DIRTY", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("USED COMPR", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("UNDER COMPR", TextTable::RIGHT, TextTable::RIGHT);
}
}
map<int,uint64_t> avail_by_rule;
for (auto p = osd_map.get_pools().begin();
p != osd_map.get_pools().end(); ++p) {
int64_t pool_id = p->first;
if ((pool_id < 0) || (pg_pool_sum.count(pool_id) == 0))
continue;
const string& pool_name = osd_map.get_pool_name(pool_id);
auto pool_pg_num = osd_map.get_pg_num(pool_id);
const pool_stat_t &stat = pg_pool_sum.at(pool_id);
const pg_pool_t *pool = osd_map.get_pg_pool(pool_id);
int ruleno = pool->get_crush_rule();
int64_t avail;
if (avail_by_rule.count(ruleno) == 0) {
// FIXME: we don't guarantee avail_space_by_rule is up-to-date before this function is invoked
avail = get_rule_avail(ruleno);
if (avail < 0)
avail = 0;
avail_by_rule[ruleno] = avail;
} else {
avail = avail_by_rule[ruleno];
}
if (f) {
f->open_object_section("pool");
f->dump_string("name", pool_name);
f->dump_int("id", pool_id);
f->open_object_section("stats");
} else {
tbl << pool_name
<< pool_id
<< pool_pg_num;
}
float raw_used_rate = osd_map.pool_raw_used_rate(pool_id);
bool per_pool = use_per_pool_stats();
bool per_pool_omap = use_per_pool_omap_stats();
dump_object_stat_sum(tbl, f, stat, avail, raw_used_rate, verbose, per_pool,
per_pool_omap, pool);
if (f) {
f->close_section(); // stats
f->close_section(); // pool
} else {
tbl << TextTable::endrow;
}
}
if (f)
f->close_section();
else {
ceph_assert(ss != nullptr);
*ss << "--- POOLS ---\n";
*ss << tbl;
}
}
void PGMapDigest::dump_cluster_stats(stringstream *ss,
ceph::Formatter *f,
bool verbose) const
{
if (f) {
f->open_object_section("stats");
f->dump_int("total_bytes", osd_sum.statfs.total);
f->dump_int("total_avail_bytes", osd_sum.statfs.available);
f->dump_int("total_used_bytes", osd_sum.statfs.get_used());
f->dump_int("total_used_raw_bytes", osd_sum.statfs.get_used_raw());
f->dump_float("total_used_raw_ratio", osd_sum.statfs.get_used_raw_ratio());
f->dump_unsigned("num_osds", osd_sum.num_osds);
f->dump_unsigned("num_per_pool_osds", osd_sum.num_per_pool_osds);
f->dump_unsigned("num_per_pool_omap_osds", osd_sum.num_per_pool_omap_osds);
f->close_section();
f->open_object_section("stats_by_class");
for (auto& i : osd_sum_by_class) {
f->open_object_section(i.first.c_str());
f->dump_int("total_bytes", i.second.statfs.total);
f->dump_int("total_avail_bytes", i.second.statfs.available);
f->dump_int("total_used_bytes", i.second.statfs.get_used());
f->dump_int("total_used_raw_bytes", i.second.statfs.get_used_raw());
f->dump_float("total_used_raw_ratio",
i.second.statfs.get_used_raw_ratio());
f->close_section();
}
f->close_section();
} else {
ceph_assert(ss != nullptr);
TextTable tbl;
tbl.define_column("CLASS", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("SIZE", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("AVAIL", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("USED", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("RAW USED", TextTable::RIGHT, TextTable::RIGHT);
tbl.define_column("%RAW USED", TextTable::RIGHT, TextTable::RIGHT);
for (auto& i : osd_sum_by_class) {
tbl << i.first;
tbl << stringify(byte_u_t(i.second.statfs.total))
<< stringify(byte_u_t(i.second.statfs.available))
<< stringify(byte_u_t(i.second.statfs.get_used()))
<< stringify(byte_u_t(i.second.statfs.get_used_raw()))
<< percentify(i.second.statfs.get_used_raw_ratio()*100.0)
<< TextTable::endrow;
}
tbl << "TOTAL";
tbl << stringify(byte_u_t(osd_sum.statfs.total))
<< stringify(byte_u_t(osd_sum.statfs.available))
<< stringify(byte_u_t(osd_sum.statfs.get_used()))
<< stringify(byte_u_t(osd_sum.statfs.get_used_raw()))
<< percentify(osd_sum.statfs.get_used_raw_ratio()*100.0)
<< TextTable::endrow;
*ss << "--- RAW STORAGE ---\n";
*ss << tbl;
}
}
void PGMapDigest::dump_object_stat_sum(
TextTable &tbl, ceph::Formatter *f,
const pool_stat_t &pool_stat, uint64_t avail,
float raw_used_rate, bool verbose, bool per_pool, bool per_pool_omap,
const pg_pool_t *pool)
{
const object_stat_sum_t &sum = pool_stat.stats.sum;
const store_statfs_t statfs = pool_stat.store_stats;
if (sum.num_object_copies > 0) {
raw_used_rate *= (float)(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies;
}
uint64_t used_data_bytes = pool_stat.get_allocated_data_bytes(per_pool);
uint64_t used_omap_bytes = pool_stat.get_allocated_omap_bytes(per_pool_omap);
uint64_t used_bytes = used_data_bytes + used_omap_bytes;
float used = 0.0;
// note avail passed in is raw_avail, calc raw_used here.
if (avail) {
used = used_bytes;
used /= used + avail;
} else if (used_bytes) {
used = 1.0;
}
auto avail_res = raw_used_rate ? avail / raw_used_rate : 0;
// an approximation for actually stored user data
auto stored_data_normalized = pool_stat.get_user_data_bytes(
raw_used_rate, per_pool);
auto stored_omap_normalized = pool_stat.get_user_omap_bytes(
raw_used_rate, per_pool_omap);
auto stored_normalized = stored_data_normalized + stored_omap_normalized;
// same, amplied by replication or EC
auto stored_raw = stored_normalized * raw_used_rate;
if (f) {
f->dump_int("stored", stored_normalized);
if (verbose) {
f->dump_int("stored_data", stored_data_normalized);
f->dump_int("stored_omap", stored_omap_normalized);
}
f->dump_int("objects", sum.num_objects);
f->dump_int("kb_used", shift_round_up(used_bytes, 10));
f->dump_int("bytes_used", used_bytes);
if (verbose) {
f->dump_int("data_bytes_used", used_data_bytes);
f->dump_int("omap_bytes_used", used_omap_bytes);
}
f->dump_float("percent_used", used);
f->dump_unsigned("max_avail", avail_res);
if (verbose) {
f->dump_int("quota_objects", pool->quota_max_objects);
f->dump_int("quota_bytes", pool->quota_max_bytes);
if (pool->is_tier()) {
f->dump_int("dirty", sum.num_objects_dirty);
} else {
f->dump_int("dirty", 0);
}
f->dump_int("rd", sum.num_rd);
f->dump_int("rd_bytes", sum.num_rd_kb * 1024ull);
f->dump_int("wr", sum.num_wr);
f->dump_int("wr_bytes", sum.num_wr_kb * 1024ull);
f->dump_int("compress_bytes_used", statfs.data_compressed_allocated);
f->dump_int("compress_under_bytes", statfs.data_compressed_original);
// Stored by user amplified by replication
f->dump_int("stored_raw", stored_raw);
f->dump_unsigned("avail_raw", avail);
}
} else {
tbl << stringify(byte_u_t(stored_normalized));
if (verbose) {
tbl << stringify(byte_u_t(stored_data_normalized));
tbl << stringify(byte_u_t(stored_omap_normalized));
}
tbl << stringify(si_u_t(sum.num_objects));
tbl << stringify(byte_u_t(used_bytes));
if (verbose) {
tbl << stringify(byte_u_t(used_data_bytes));
tbl << stringify(byte_u_t(used_omap_bytes));
}
tbl << percentify(used*100);
tbl << stringify(byte_u_t(avail_res));
if (verbose) {
if (pool->quota_max_objects == 0)
tbl << "N/A";
else
tbl << stringify(si_u_t(pool->quota_max_objects));
if (pool->quota_max_bytes == 0)
tbl << "N/A";
else
tbl << stringify(byte_u_t(pool->quota_max_bytes));
if (pool->is_tier()) {
tbl << stringify(si_u_t(sum.num_objects_dirty));
} else {
tbl << "N/A";
}
tbl << stringify(byte_u_t(statfs.data_compressed_allocated));
tbl << stringify(byte_u_t(statfs.data_compressed_original));
}
}
}
int64_t PGMapDigest::get_pool_free_space(const OSDMap &osd_map,
int64_t poolid) const
{
const pg_pool_t *pool = osd_map.get_pg_pool(poolid);
int ruleno = pool->get_crush_rule();
int64_t avail;
avail = get_rule_avail(ruleno);
if (avail < 0)
avail = 0;
return avail / osd_map.pool_raw_used_rate(poolid);
}
int64_t PGMap::get_rule_avail(const OSDMap& osdmap, int ruleno) const
{
map<int,float> wm;
int r = osdmap.crush->get_rule_weight_osd_map(ruleno, &wm);
if (r < 0) {
return r;
}
if (wm.empty()) {
return 0;
}
float fratio = osdmap.get_full_ratio();
int64_t min = -1;
for (auto p = wm.begin(); p != wm.end(); ++p) {
auto osd_info = osd_stat.find(p->first);
if (osd_info != osd_stat.end()) {
if (osd_info->second.statfs.total == 0 || p->second == 0) {
// osd must be out, hence its stats have been zeroed
// (unless we somehow managed to have a disk with size 0...)
//
// (p->second == 0), if osd weight is 0, no need to
// calculate proj below.
continue;
}
double unusable = (double)osd_info->second.statfs.kb() *
(1.0 - fratio);
double avail = std::max(0.0, (double)osd_info->second.statfs.kb_avail() - unusable);
avail *= 1024.0;
int64_t proj = (int64_t)(avail / (double)p->second);
if (min < 0 || proj < min) {
min = proj;
}
} else {
if (osdmap.is_up(p->first)) {
// This is a level 4 rather than an error, because we might have
// only just started, and not received the first stats message yet.
dout(4) << "OSD " << p->first << " is up, but has no stats" << dendl;
}
}
}
return min;
}
void PGMap::get_rules_avail(const OSDMap& osdmap,
std::map<int,int64_t> *avail_map) const
{
avail_map->clear();
for (auto p : osdmap.get_pools()) {
int64_t pool_id = p.first;
if ((pool_id < 0) || (pg_pool_sum.count(pool_id) == 0))
continue;
const pg_pool_t *pool = osdmap.get_pg_pool(pool_id);
int ruleno = pool->get_crush_rule();
if (avail_map->count(ruleno) == 0)
(*avail_map)[ruleno] = get_rule_avail(osdmap, ruleno);
}
}
// ---------------------
// PGMap
void PGMap::Incremental::dump(ceph::Formatter *f) const
{
f->dump_unsigned("version", version);
f->dump_stream("stamp") << stamp;
f->dump_unsigned("osdmap_epoch", osdmap_epoch);
f->dump_unsigned("pg_scan_epoch", pg_scan);
f->open_array_section("pg_stat_updates");
for (auto p = pg_stat_updates.begin(); p != pg_stat_updates.end(); ++p) {
f->open_object_section("pg_stat");
f->dump_stream("pgid") << p->first;
p->second.dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("osd_stat_updates");
for (auto p = osd_stat_updates.begin(); p != osd_stat_updates.end(); ++p) {
f->open_object_section("osd_stat");
f->dump_int("osd", p->first);
p->second.dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("pool_statfs_updates");
for (auto p = pool_statfs_updates.begin(); p != pool_statfs_updates.end(); ++p) {
f->open_object_section("pool_statfs");
f->dump_stream("poolid/osd") << p->first;
p->second.dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("osd_stat_removals");
for (auto p = osd_stat_rm.begin(); p != osd_stat_rm.end(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->open_array_section("pg_removals");
for (auto p = pg_remove.begin(); p != pg_remove.end(); ++p)
f->dump_stream("pgid") << *p;
f->close_section();
}
void PGMap::Incremental::generate_test_instances(list<PGMap::Incremental*>& o)
{
o.push_back(new Incremental);
o.push_back(new Incremental);
o.back()->version = 1;
o.back()->stamp = utime_t(123,345);
o.push_back(new Incremental);
o.back()->version = 2;
o.back()->pg_stat_updates[pg_t(1,2)] = pg_stat_t();
o.back()->osd_stat_updates[5] = osd_stat_t();
o.push_back(new Incremental);
o.back()->version = 3;
o.back()->osdmap_epoch = 1;
o.back()->pg_scan = 2;
o.back()->pg_stat_updates[pg_t(4,5)] = pg_stat_t();
o.back()->osd_stat_updates[6] = osd_stat_t();
o.back()->pg_remove.insert(pg_t(1,2));
o.back()->osd_stat_rm.insert(5);
o.back()->pool_statfs_updates[std::make_pair(1234,4)] = store_statfs_t();
}
// --
void PGMap::apply_incremental(CephContext *cct, const Incremental& inc)
{
ceph_assert(inc.version == version+1);
version++;
pool_stat_t pg_sum_old = pg_sum;
mempool::pgmap::unordered_map<int32_t, pool_stat_t> pg_pool_sum_old;
pg_pool_sum_old = pg_pool_sum;
for (auto p = inc.pg_stat_updates.begin();
p != inc.pg_stat_updates.end();
++p) {
const pg_t &update_pg(p->first);
auto update_pool = update_pg.pool();
const pg_stat_t &update_stat(p->second);
auto pg_stat_iter = pg_stat.find(update_pg);
pool_stat_t &pool_sum_ref = pg_pool_sum[update_pool];
if (pg_stat_iter == pg_stat.end()) {
pg_stat.insert(make_pair(update_pg, update_stat));
} else {
stat_pg_sub(update_pg, pg_stat_iter->second);
pool_sum_ref.sub(pg_stat_iter->second);
pg_stat_iter->second = update_stat;
}
stat_pg_add(update_pg, update_stat);
pool_sum_ref.add(update_stat);
}
for (auto p = inc.pool_statfs_updates.begin();
p != inc.pool_statfs_updates.end();
++p) {
auto update_pool = p->first.first;
auto update_osd = p->first.second;
auto& statfs_inc = p->second;
auto pool_statfs_iter =
pool_statfs.find(std::make_pair(update_pool, update_osd));
if (pg_pool_sum.count(update_pool)) {
pool_stat_t &pool_sum_ref = pg_pool_sum[update_pool];
if (pool_statfs_iter == pool_statfs.end()) {
pool_statfs.emplace(std::make_pair(update_pool, update_osd), statfs_inc);
} else {
pool_sum_ref.sub(pool_statfs_iter->second);
pool_statfs_iter->second = statfs_inc;
}
pool_sum_ref.add(statfs_inc);
}
}
for (auto p = inc.get_osd_stat_updates().begin();
p != inc.get_osd_stat_updates().end();
++p) {
int osd = p->first;
const osd_stat_t &new_stats(p->second);
auto t = osd_stat.find(osd);
if (t == osd_stat.end()) {
osd_stat.insert(make_pair(osd, new_stats));
} else {
stat_osd_sub(t->first, t->second);
t->second = new_stats;
}
stat_osd_add(osd, new_stats);
}
set<int64_t> deleted_pools;
for (auto p = inc.pg_remove.begin();
p != inc.pg_remove.end();
++p) {
const pg_t &removed_pg(*p);
auto s = pg_stat.find(removed_pg);
bool pool_erased = false;
if (s != pg_stat.end()) {
pool_erased = stat_pg_sub(removed_pg, s->second);
// decrease pool stats if pg was removed
auto pool_stats_it = pg_pool_sum.find(removed_pg.pool());
if (pool_stats_it != pg_pool_sum.end()) {
pool_stats_it->second.sub(s->second);
}
pg_stat.erase(s);
if (pool_erased) {
deleted_pools.insert(removed_pg.pool());
}
}
}
for (auto p = inc.get_osd_stat_rm().begin();
p != inc.get_osd_stat_rm().end();
++p) {
auto t = osd_stat.find(*p);
if (t != osd_stat.end()) {
stat_osd_sub(t->first, t->second);
osd_stat.erase(t);
}
for (auto i = pool_statfs.begin(); i != pool_statfs.end(); ++i) {
if (i->first.second == *p) {
pg_pool_sum[i->first.first].sub(i->second);
pool_statfs.erase(i);
}
}
}
// skip calculating delta while sum was not synchronized
if (!stamp.is_zero() && !pg_sum_old.stats.sum.is_zero()) {
utime_t delta_t;
delta_t = inc.stamp;
delta_t -= stamp;
// calculate a delta, and average over the last 2 deltas.
pool_stat_t d = pg_sum;
d.stats.sub(pg_sum_old.stats);
pg_sum_deltas.push_back(make_pair(d, delta_t));
stamp_delta += delta_t;
pg_sum_delta.stats.add(d.stats);
auto smooth_intervals =
cct ? cct->_conf.get_val<uint64_t>("mon_stat_smooth_intervals") : 1;
while (pg_sum_deltas.size() > smooth_intervals) {
pg_sum_delta.stats.sub(pg_sum_deltas.front().first.stats);
stamp_delta -= pg_sum_deltas.front().second;
pg_sum_deltas.pop_front();
}
}
stamp = inc.stamp;
update_pool_deltas(cct, inc.stamp, pg_pool_sum_old);
for (auto p : deleted_pools) {
if (cct)
dout(20) << " deleted pool " << p << dendl;
deleted_pool(p);
}
if (inc.osdmap_epoch)
last_osdmap_epoch = inc.osdmap_epoch;
if (inc.pg_scan)
last_pg_scan = inc.pg_scan;
}
void PGMap::calc_stats()
{
num_pg = 0;
num_pg_active = 0;
num_pg_unknown = 0;
num_osd = 0;
pg_pool_sum.clear();
num_pg_by_pool.clear();
pg_by_osd.clear();
pg_sum = pool_stat_t();
osd_sum = osd_stat_t();
osd_sum_by_class.clear();
num_pg_by_state.clear();
num_pg_by_pool_state.clear();
num_pg_by_osd.clear();
for (auto p = pg_stat.begin();
p != pg_stat.end();
++p) {
auto pg = p->first;
stat_pg_add(pg, p->second);
pg_pool_sum[pg.pool()].add(p->second);
}
for (auto p = pool_statfs.begin();
p != pool_statfs.end();
++p) {
auto pool = p->first.first;
pg_pool_sum[pool].add(p->second);
}
for (auto p = osd_stat.begin();
p != osd_stat.end();
++p)
stat_osd_add(p->first, p->second);
}
void PGMap::stat_pg_add(const pg_t &pgid, const pg_stat_t &s,
bool sameosds)
{
auto pool = pgid.pool();
pg_sum.add(s);
num_pg++;
num_pg_by_state[s.state]++;
num_pg_by_pool_state[pgid.pool()][s.state]++;
num_pg_by_pool[pool]++;
if ((s.state & PG_STATE_CREATING) &&
s.parent_split_bits == 0) {
creating_pgs.insert(pgid);
if (s.acting_primary >= 0) {
creating_pgs_by_osd_epoch[s.acting_primary][s.mapping_epoch].insert(pgid);
}
}
if (s.state & PG_STATE_ACTIVE) {
++num_pg_active;
}
if (s.state == 0) {
++num_pg_unknown;
}
if (sameosds)
return;
for (auto p = s.blocked_by.begin();
p != s.blocked_by.end();
++p) {
++blocked_by_sum[*p];
}
for (auto p = s.acting.begin(); p != s.acting.end(); ++p) {
pg_by_osd[*p].insert(pgid);
num_pg_by_osd[*p].acting++;
}
for (auto p = s.up.begin(); p != s.up.end(); ++p) {
auto& t = pg_by_osd[*p];
if (t.find(pgid) == t.end()) {
t.insert(pgid);
num_pg_by_osd[*p].up_not_acting++;
}
}
if (s.up_primary >= 0) {
num_pg_by_osd[s.up_primary].primary++;
}
}
bool PGMap::stat_pg_sub(const pg_t &pgid, const pg_stat_t &s,
bool sameosds)
{
bool pool_erased = false;
pg_sum.sub(s);
num_pg--;
int end = --num_pg_by_state[s.state];
ceph_assert(end >= 0);
if (end == 0)
num_pg_by_state.erase(s.state);
if (--num_pg_by_pool_state[pgid.pool()][s.state] == 0) {
num_pg_by_pool_state[pgid.pool()].erase(s.state);
}
end = --num_pg_by_pool[pgid.pool()];
if (end == 0) {
pool_erased = true;
}
if ((s.state & PG_STATE_CREATING) &&
s.parent_split_bits == 0) {
creating_pgs.erase(pgid);
if (s.acting_primary >= 0) {
map<epoch_t,set<pg_t> >& r = creating_pgs_by_osd_epoch[s.acting_primary];
r[s.mapping_epoch].erase(pgid);
if (r[s.mapping_epoch].empty())
r.erase(s.mapping_epoch);
if (r.empty())
creating_pgs_by_osd_epoch.erase(s.acting_primary);
}
}
if (s.state & PG_STATE_ACTIVE) {
--num_pg_active;
}
if (s.state == 0) {
--num_pg_unknown;
}
if (sameosds)
return pool_erased;
for (auto p = s.blocked_by.begin();
p != s.blocked_by.end();
++p) {
auto q = blocked_by_sum.find(*p);
ceph_assert(q != blocked_by_sum.end());
--q->second;
if (q->second == 0)
blocked_by_sum.erase(q);
}
set<int32_t> actingset;
for (auto p = s.acting.begin(); p != s.acting.end(); ++p) {
actingset.insert(*p);
auto& oset = pg_by_osd[*p];
oset.erase(pgid);
if (oset.empty())
pg_by_osd.erase(*p);
auto it = num_pg_by_osd.find(*p);
if (it != num_pg_by_osd.end() && it->second.acting > 0)
it->second.acting--;
}
for (auto p = s.up.begin(); p != s.up.end(); ++p) {
auto& oset = pg_by_osd[*p];
oset.erase(pgid);
if (oset.empty())
pg_by_osd.erase(*p);
if (actingset.count(*p))
continue;
auto it = num_pg_by_osd.find(*p);
if (it != num_pg_by_osd.end() && it->second.up_not_acting > 0)
it->second.up_not_acting--;
}
if (s.up_primary >= 0) {
auto it = num_pg_by_osd.find(s.up_primary);
if (it != num_pg_by_osd.end() && it->second.primary > 0)
it->second.primary--;
}
return pool_erased;
}
void PGMap::calc_purged_snaps()
{
purged_snaps.clear();
set<int64_t> unknown;
for (auto& i : pg_stat) {
if (i.second.state == 0) {
unknown.insert(i.first.pool());
purged_snaps.erase(i.first.pool());
continue;
} else if (unknown.count(i.first.pool())) {
continue;
}
auto j = purged_snaps.find(i.first.pool());
if (j == purged_snaps.end()) {
// base case
purged_snaps[i.first.pool()] = i.second.purged_snaps;
} else {
j->second.intersection_of(i.second.purged_snaps);
}
}
}
void PGMap::calc_osd_sum_by_class(const OSDMap& osdmap)
{
osd_sum_by_class.clear();
for (auto& i : osd_stat) {
const char *class_name = osdmap.crush->get_item_class(i.first);
if (class_name) {
osd_sum_by_class[class_name].add(i.second);
}
}
}
void PGMap::stat_osd_add(int osd, const osd_stat_t &s)
{
num_osd++;
osd_sum.add(s);
if (osd >= (int)osd_last_seq.size()) {
osd_last_seq.resize(osd + 1);
}
osd_last_seq[osd] = s.seq;
}
void PGMap::stat_osd_sub(int osd, const osd_stat_t &s)
{
num_osd--;
osd_sum.sub(s);
ceph_assert(osd < (int)osd_last_seq.size());
osd_last_seq[osd] = 0;
}
void PGMap::encode_digest(const OSDMap& osdmap,
bufferlist& bl, uint64_t features)
{
get_rules_avail(osdmap, &avail_space_by_rule);
calc_osd_sum_by_class(osdmap);
calc_purged_snaps();
PGMapDigest::encode(bl, features);
}
void PGMap::encode(bufferlist &bl, uint64_t features) const
{
ENCODE_START(8, 8, bl);
encode(version, bl);
encode(pg_stat, bl);
encode(osd_stat, bl, features);
encode(last_osdmap_epoch, bl);
encode(last_pg_scan, bl);
encode(stamp, bl);
encode(pool_statfs, bl, features);
ENCODE_FINISH(bl);
}
void PGMap::decode(bufferlist::const_iterator &bl)
{
DECODE_START(8, bl);
decode(version, bl);
decode(pg_stat, bl);
decode(osd_stat, bl);
decode(last_osdmap_epoch, bl);
decode(last_pg_scan, bl);
decode(stamp, bl);
decode(pool_statfs, bl);
DECODE_FINISH(bl);
calc_stats();
}
void PGMap::dump(ceph::Formatter *f, bool with_net) const
{
dump_basic(f);
dump_pg_stats(f, false);
dump_pool_stats(f);
dump_osd_stats(f, with_net);
}
void PGMap::dump_basic(ceph::Formatter *f) const
{
f->dump_unsigned("version", version);
f->dump_stream("stamp") << stamp;
f->dump_unsigned("last_osdmap_epoch", last_osdmap_epoch);
f->dump_unsigned("last_pg_scan", last_pg_scan);
f->open_object_section("pg_stats_sum");
pg_sum.dump(f);
f->close_section();
f->open_object_section("osd_stats_sum");
osd_sum.dump(f);
f->close_section();
dump_delta(f);
}
void PGMap::dump_delta(ceph::Formatter *f) const
{
f->open_object_section("pg_stats_delta");
pg_sum_delta.dump(f);
f->dump_stream("stamp_delta") << stamp_delta;
f->close_section();
}
void PGMap::dump_pg_stats(ceph::Formatter *f, bool brief) const
{
f->open_array_section("pg_stats");
for (auto i = pg_stat.begin();
i != pg_stat.end();
++i) {
f->open_object_section("pg_stat");
f->dump_stream("pgid") << i->first;
if (brief)
i->second.dump_brief(f);
else
i->second.dump(f);
f->close_section();
}
f->close_section();
}
void PGMap::dump_pg_progress(ceph::Formatter *f) const
{
f->open_object_section("pgs");
for (auto& i : pg_stat) {
std::string n = stringify(i.first);
f->open_object_section(n.c_str());
f->dump_int("num_bytes_recovered", i.second.stats.sum.num_bytes_recovered);
f->dump_int("num_bytes", i.second.stats.sum.num_bytes);
f->dump_unsigned("reported_epoch", i.second.reported_epoch);
f->dump_string("state", pg_state_string(i.second.state));
f->close_section();
}
f->close_section();
}
void PGMap::dump_pool_stats(ceph::Formatter *f) const
{
f->open_array_section("pool_stats");
for (auto p = pg_pool_sum.begin();
p != pg_pool_sum.end();
++p) {
f->open_object_section("pool_stat");
f->dump_int("poolid", p->first);
auto q = num_pg_by_pool.find(p->first);
if (q != num_pg_by_pool.end())
f->dump_unsigned("num_pg", q->second);
p->second.dump(f);
f->close_section();
}
f->close_section();
}
void PGMap::dump_osd_stats(ceph::Formatter *f, bool with_net) const
{
f->open_array_section("osd_stats");
for (auto q = osd_stat.begin();
q != osd_stat.end();
++q) {
f->open_object_section("osd_stat");
f->dump_int("osd", q->first);
q->second.dump(f, with_net);
f->close_section();
}
f->close_section();
f->open_array_section("pool_statfs");
for (auto& p : pool_statfs) {
f->open_object_section("item");
f->dump_int("poolid", p.first.first);
f->dump_int("osd", p.first.second);
p.second.dump(f);
f->close_section();
}
f->close_section();
}
void PGMap::dump_osd_ping_times(ceph::Formatter *f) const
{
f->open_array_section("osd_ping_times");
for (const auto& [osd, stat] : osd_stat) {
f->open_object_section("osd_ping_time");
f->dump_int("osd", osd);
stat.dump_ping_time(f);
f->close_section();
}
f->close_section();
}
// note: dump_pg_stats_plain() is static
void PGMap::dump_pg_stats_plain(
ostream& ss,
const mempool::pgmap::unordered_map<pg_t, pg_stat_t>& pg_stats,
bool brief)
{
TextTable tab;
if (brief){
tab.define_column("PG_STAT", TextTable::LEFT, TextTable::LEFT);
tab.define_column("STATE", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UP_PRIMARY", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("ACTING", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("ACTING_PRIMARY", TextTable::LEFT, TextTable::RIGHT);
}
else {
tab.define_column("PG_STAT", TextTable::LEFT, TextTable::LEFT);
tab.define_column("OBJECTS", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("MISSING_ON_PRIMARY", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DEGRADED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("MISPLACED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UNFOUND", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("BYTES", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_BYTES*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_KEYS*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LOG", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LOG_DUPS", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DISK_LOG", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("STATE", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("STATE_STAMP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("VERSION", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("REPORTED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UP_PRIMARY", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("ACTING", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("ACTING_PRIMARY", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LAST_SCRUB", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("SCRUB_STAMP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LAST_DEEP_SCRUB", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DEEP_SCRUB_STAMP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("SNAPTRIMQ_LEN", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LAST_SCRUB_DURATION", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("SCRUB_SCHEDULING", TextTable::LEFT, TextTable::LEFT);
tab.define_column("OBJECTS_SCRUBBED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OBJECTS_TRIMMED", TextTable::LEFT, TextTable::RIGHT);
}
for (const auto& [pg, st] : pg_stats) {
if (brief) {
tab << pg
<< pg_state_string(st.state)
<< st.up
<< st.up_primary
<< st.acting
<< st.acting_primary
<< TextTable::endrow;
} else {
ostringstream reported;
reported << st.reported_epoch << ":" << st.reported_seq;
tab << pg
<< st.stats.sum.num_objects
<< st.stats.sum.num_objects_missing_on_primary
<< st.stats.sum.num_objects_degraded
<< st.stats.sum.num_objects_misplaced
<< st.stats.sum.num_objects_unfound
<< st.stats.sum.num_bytes
<< st.stats.sum.num_omap_bytes
<< st.stats.sum.num_omap_keys
<< st.log_size
<< st.log_dups_size
<< st.ondisk_log_size
<< pg_state_string(st.state)
<< st.last_change
<< st.version
<< reported.str()
<< pg_vector_string(st.up)
<< st.up_primary
<< pg_vector_string(st.acting)
<< st.acting_primary
<< st.last_scrub
<< st.last_scrub_stamp
<< st.last_deep_scrub
<< st.last_deep_scrub_stamp
<< st.snaptrimq_len
<< st.last_scrub_duration
<< st.dump_scrub_schedule()
<< st.objects_scrubbed
<< st.objects_trimmed
<< TextTable::endrow;
}
}
ss << tab;
}
void PGMap::dump(ostream& ss) const
{
dump_basic(ss);
dump_pg_stats(ss, false);
dump_pool_stats(ss, false);
dump_pg_sum_stats(ss, false);
dump_osd_stats(ss);
}
void PGMap::dump_basic(ostream& ss) const
{
ss << "version " << version << std::endl;
ss << "stamp " << stamp << std::endl;
ss << "last_osdmap_epoch " << last_osdmap_epoch << std::endl;
ss << "last_pg_scan " << last_pg_scan << std::endl;
}
void PGMap::dump_pg_stats(ostream& ss, bool brief) const
{
dump_pg_stats_plain(ss, pg_stat, brief);
}
void PGMap::dump_pool_stats(ostream& ss, bool header) const
{
TextTable tab;
if (header) {
tab.define_column("POOLID", TextTable::LEFT, TextTable::LEFT);
tab.define_column("OBJECTS", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("MISSING_ON_PRIMARY", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DEGRADED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("MISPLACED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UNFOUND", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("BYTES", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_BYTES*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_KEYS*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LOG", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DISK_LOG", TextTable::LEFT, TextTable::RIGHT);
} else {
tab.define_column("", TextTable::LEFT, TextTable::LEFT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
}
for (auto p = pg_pool_sum.begin();
p != pg_pool_sum.end();
++p) {
tab << p->first
<< p->second.stats.sum.num_objects
<< p->second.stats.sum.num_objects_missing_on_primary
<< p->second.stats.sum.num_objects_degraded
<< p->second.stats.sum.num_objects_misplaced
<< p->second.stats.sum.num_objects_unfound
<< p->second.stats.sum.num_bytes
<< p->second.stats.sum.num_omap_bytes
<< p->second.stats.sum.num_omap_keys
<< p->second.log_size
<< p->second.ondisk_log_size
<< TextTable::endrow;
}
ss << tab;
}
void PGMap::dump_pg_sum_stats(ostream& ss, bool header) const
{
TextTable tab;
if (header) {
tab.define_column("PG_STAT", TextTable::LEFT, TextTable::LEFT);
tab.define_column("OBJECTS", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("MISSING_ON_PRIMARY", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DEGRADED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("MISPLACED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UNFOUND", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("BYTES", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_BYTES*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_KEYS*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LOG", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DISK_LOG", TextTable::LEFT, TextTable::RIGHT);
} else {
tab.define_column("", TextTable::LEFT, TextTable::LEFT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("", TextTable::LEFT, TextTable::RIGHT);
};
tab << "sum"
<< pg_sum.stats.sum.num_objects
<< pg_sum.stats.sum.num_objects_missing_on_primary
<< pg_sum.stats.sum.num_objects_degraded
<< pg_sum.stats.sum.num_objects_misplaced
<< pg_sum.stats.sum.num_objects_unfound
<< pg_sum.stats.sum.num_bytes
<< pg_sum.stats.sum.num_omap_bytes
<< pg_sum.stats.sum.num_omap_keys
<< pg_sum.log_size
<< pg_sum.ondisk_log_size
<< TextTable::endrow;
ss << tab;
}
void PGMap::dump_osd_stats(ostream& ss) const
{
TextTable tab;
tab.define_column("OSD_STAT", TextTable::LEFT, TextTable::LEFT);
tab.define_column("USED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("AVAIL", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("USED_RAW", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("TOTAL", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("HB_PEERS", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("PG_SUM", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("PRIMARY_PG_SUM", TextTable::LEFT, TextTable::RIGHT);
for (auto p = osd_stat.begin();
p != osd_stat.end();
++p) {
tab << p->first
<< byte_u_t(p->second.statfs.get_used())
<< byte_u_t(p->second.statfs.available)
<< byte_u_t(p->second.statfs.get_used_raw())
<< byte_u_t(p->second.statfs.total)
<< p->second.hb_peers
<< get_num_pg_by_osd(p->first)
<< get_num_primary_pg_by_osd(p->first)
<< TextTable::endrow;
}
tab << "sum"
<< byte_u_t(osd_sum.statfs.get_used())
<< byte_u_t(osd_sum.statfs.available)
<< byte_u_t(osd_sum.statfs.get_used_raw())
<< byte_u_t(osd_sum.statfs.total)
<< TextTable::endrow;
ss << tab;
}
void PGMap::dump_osd_sum_stats(ostream& ss) const
{
TextTable tab;
tab.define_column("OSD_STAT", TextTable::LEFT, TextTable::LEFT);
tab.define_column("USED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("AVAIL", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("USED_RAW", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("TOTAL", TextTable::LEFT, TextTable::RIGHT);
tab << "sum"
<< byte_u_t(osd_sum.statfs.get_used())
<< byte_u_t(osd_sum.statfs.available)
<< byte_u_t(osd_sum.statfs.get_used_raw())
<< byte_u_t(osd_sum.statfs.total)
<< TextTable::endrow;
ss << tab;
}
void PGMap::get_stuck_stats(
int types, const utime_t cutoff,
mempool::pgmap::unordered_map<pg_t, pg_stat_t>& stuck_pgs) const
{
ceph_assert(types != 0);
for (auto i = pg_stat.begin();
i != pg_stat.end();
++i) {
utime_t val = cutoff; // don't care about >= cutoff so that is infinity
if ((types & STUCK_INACTIVE) && !(i->second.state & PG_STATE_ACTIVE)) {
if (i->second.last_active < val)
val = i->second.last_active;
}
if ((types & STUCK_UNCLEAN) && !(i->second.state & PG_STATE_CLEAN)) {
if (i->second.last_clean < val)
val = i->second.last_clean;
}
if ((types & STUCK_DEGRADED) && (i->second.state & PG_STATE_DEGRADED)) {
if (i->second.last_undegraded < val)
val = i->second.last_undegraded;
}
if ((types & STUCK_UNDERSIZED) && (i->second.state & PG_STATE_UNDERSIZED)) {
if (i->second.last_fullsized < val)
val = i->second.last_fullsized;
}
if ((types & STUCK_STALE) && (i->second.state & PG_STATE_STALE)) {
if (i->second.last_unstale < val)
val = i->second.last_unstale;
}
// val is now the earliest any of the requested stuck states began
if (val < cutoff) {
stuck_pgs[i->first] = i->second;
}
}
}
void PGMap::dump_stuck(ceph::Formatter *f, int types, utime_t cutoff) const
{
mempool::pgmap::unordered_map<pg_t, pg_stat_t> stuck_pg_stats;
get_stuck_stats(types, cutoff, stuck_pg_stats);
f->open_array_section("stuck_pg_stats");
for (auto i = stuck_pg_stats.begin();
i != stuck_pg_stats.end();
++i) {
f->open_object_section("pg_stat");
f->dump_stream("pgid") << i->first;
i->second.dump(f);
f->close_section();
}
f->close_section();
}
void PGMap::dump_stuck_plain(ostream& ss, int types, utime_t cutoff) const
{
mempool::pgmap::unordered_map<pg_t, pg_stat_t> stuck_pg_stats;
get_stuck_stats(types, cutoff, stuck_pg_stats);
if (!stuck_pg_stats.empty())
dump_pg_stats_plain(ss, stuck_pg_stats, true);
}
int PGMap::dump_stuck_pg_stats(
stringstream &ds,
ceph::Formatter *f,
int threshold,
vector<string>& args) const
{
int stuck_types = 0;
for (auto i = args.begin(); i != args.end(); ++i) {
if (*i == "inactive")
stuck_types |= PGMap::STUCK_INACTIVE;
else if (*i == "unclean")
stuck_types |= PGMap::STUCK_UNCLEAN;
else if (*i == "undersized")
stuck_types |= PGMap::STUCK_UNDERSIZED;
else if (*i == "degraded")
stuck_types |= PGMap::STUCK_DEGRADED;
else if (*i == "stale")
stuck_types |= PGMap::STUCK_STALE;
else {
ds << "Unknown type: " << *i << std::endl;
return -EINVAL;
}
}
utime_t now(ceph_clock_now());
utime_t cutoff = now - utime_t(threshold, 0);
if (!f) {
dump_stuck_plain(ds, stuck_types, cutoff);
} else {
dump_stuck(f, stuck_types, cutoff);
f->flush(ds);
}
return 0;
}
void PGMap::dump_osd_perf_stats(ceph::Formatter *f) const
{
f->open_array_section("osd_perf_infos");
for (auto i = osd_stat.begin();
i != osd_stat.end();
++i) {
f->open_object_section("osd");
f->dump_int("id", i->first);
{
f->open_object_section("perf_stats");
i->second.os_perf_stat.dump(f);
f->close_section();
}
f->close_section();
}
f->close_section();
}
void PGMap::print_osd_perf_stats(std::ostream *ss) const
{
TextTable tab;
tab.define_column("osd", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("commit_latency(ms)", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("apply_latency(ms)", TextTable::LEFT, TextTable::RIGHT);
for (auto i = osd_stat.begin();
i != osd_stat.end();
++i) {
tab << i->first;
tab << i->second.os_perf_stat.os_commit_latency_ns / 1000000ull;
tab << i->second.os_perf_stat.os_apply_latency_ns / 1000000ull;
tab << TextTable::endrow;
}
(*ss) << tab;
}
void PGMap::dump_osd_blocked_by_stats(ceph::Formatter *f) const
{
f->open_array_section("osd_blocked_by_infos");
for (auto i = blocked_by_sum.begin();
i != blocked_by_sum.end();
++i) {
f->open_object_section("osd");
f->dump_int("id", i->first);
f->dump_int("num_blocked", i->second);
f->close_section();
}
f->close_section();
}
void PGMap::print_osd_blocked_by_stats(std::ostream *ss) const
{
TextTable tab;
tab.define_column("osd", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("num_blocked", TextTable::LEFT, TextTable::RIGHT);
for (auto i = blocked_by_sum.begin();
i != blocked_by_sum.end();
++i) {
tab << i->first;
tab << i->second;
tab << TextTable::endrow;
}
(*ss) << tab;
}
/**
* update aggregated delta
*
* @param cct ceph context
* @param ts Timestamp for the stats being delta'ed
* @param old_pool_sum Previous stats sum
* @param last_ts Last timestamp for pool
* @param result_pool_sum Resulting stats
* @param result_pool_delta Resulting pool delta
* @param result_ts_delta Resulting timestamp delta
* @param delta_avg_list List of last N computed deltas, used to average
*/
void PGMap::update_delta(
CephContext *cct,
const utime_t ts,
const pool_stat_t& old_pool_sum,
utime_t *last_ts,
const pool_stat_t& current_pool_sum,
pool_stat_t *result_pool_delta,
utime_t *result_ts_delta,
mempool::pgmap::list<pair<pool_stat_t,utime_t> > *delta_avg_list)
{
/* @p ts is the timestamp we want to associate with the data
* in @p old_pool_sum, and on which we will base ourselves to
* calculate the delta, stored in 'delta_t'.
*/
utime_t delta_t;
delta_t = ts; // start with the provided timestamp
delta_t -= *last_ts; // take the last timestamp we saw
*last_ts = ts; // @p ts becomes the last timestamp we saw
// adjust delta_t, quick start if there is no update in a long period
delta_t = std::min(delta_t,
utime_t(2 * (cct ? cct->_conf->mon_delta_reset_interval : 10), 0));
// calculate a delta, and average over the last 6 deltas by default.
/* start by taking a copy of our current @p result_pool_sum, and by
* taking out the stats from @p old_pool_sum. This generates a stats
* delta. Stash this stats delta in @p delta_avg_list, along with the
* timestamp delta for these results.
*/
pool_stat_t d = current_pool_sum;
d.stats.sub(old_pool_sum.stats);
/* Aggregate current delta, and take out the last seen delta (if any) to
* average it out.
* Skip calculating delta while sum was not synchronized.
*/
if(!old_pool_sum.stats.sum.is_zero()) {
delta_avg_list->push_back(make_pair(d,delta_t));
*result_ts_delta += delta_t;
result_pool_delta->stats.add(d.stats);
}
size_t s = cct ? cct->_conf.get_val<uint64_t>("mon_stat_smooth_intervals") : 1;
while (delta_avg_list->size() > s) {
result_pool_delta->stats.sub(delta_avg_list->front().first.stats);
*result_ts_delta -= delta_avg_list->front().second;
delta_avg_list->pop_front();
}
}
/**
* Update a given pool's deltas
*
* @param cct Ceph Context
* @param ts Timestamp for the stats being delta'ed
* @param pool Pool's id
* @param old_pool_sum Previous stats sum
*/
void PGMap::update_one_pool_delta(
CephContext *cct,
const utime_t ts,
const int64_t pool,
const pool_stat_t& old_pool_sum)
{
if (per_pool_sum_deltas.count(pool) == 0) {
ceph_assert(per_pool_sum_deltas_stamps.count(pool) == 0);
ceph_assert(per_pool_sum_delta.count(pool) == 0);
}
auto& sum_delta = per_pool_sum_delta[pool];
update_delta(cct, ts, old_pool_sum, &sum_delta.second, pg_pool_sum[pool],
&sum_delta.first, &per_pool_sum_deltas_stamps[pool],
&per_pool_sum_deltas[pool]);
}
/**
* Update pools' deltas
*
* @param cct CephContext
* @param ts Timestamp for the stats being delta'ed
* @param pg_pool_sum_old Map of pool stats for delta calcs.
*/
void PGMap::update_pool_deltas(
CephContext *cct, const utime_t ts,
const mempool::pgmap::unordered_map<int32_t,pool_stat_t>& pg_pool_sum_old)
{
for (auto it = pg_pool_sum_old.begin();
it != pg_pool_sum_old.end(); ++it) {
update_one_pool_delta(cct, ts, it->first, it->second);
}
}
void PGMap::clear_delta()
{
pg_sum_delta = pool_stat_t();
pg_sum_deltas.clear();
stamp_delta = utime_t();
}
void PGMap::generate_test_instances(list<PGMap*>& o)
{
o.push_back(new PGMap);
list<Incremental*> inc;
Incremental::generate_test_instances(inc);
delete inc.front();
inc.pop_front();
while (!inc.empty()) {
PGMap *pmp = new PGMap();
*pmp = *o.back();
o.push_back(pmp);
o.back()->apply_incremental(NULL, *inc.front());
delete inc.front();
inc.pop_front();
}
}
void PGMap::get_filtered_pg_stats(uint64_t state, int64_t poolid, int64_t osdid,
bool primary, set<pg_t>& pgs) const
{
for (auto i = pg_stat.begin();
i != pg_stat.end();
++i) {
if ((poolid >= 0) && (poolid != i->first.pool()))
continue;
if ((osdid >= 0) && !(i->second.is_acting_osd(osdid,primary)))
continue;
if (state == (uint64_t)-1 || // "all"
(i->second.state & state) || // matches a state bit
(state == 0 && i->second.state == 0)) { // matches "unknown" (== 0)
pgs.insert(i->first);
}
}
}
void PGMap::dump_filtered_pg_stats(ceph::Formatter *f, set<pg_t>& pgs) const
{
f->open_array_section("pg_stats");
for (auto i = pgs.begin(); i != pgs.end(); ++i) {
const pg_stat_t& st = pg_stat.at(*i);
f->open_object_section("pg_stat");
f->dump_stream("pgid") << *i;
st.dump(f);
f->close_section();
}
f->close_section();
}
void PGMap::dump_filtered_pg_stats(ostream& ss, set<pg_t>& pgs) const
{
TextTable tab;
utime_t now = ceph_clock_now();
tab.define_column("PG", TextTable::LEFT, TextTable::LEFT);
tab.define_column("OBJECTS", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DEGRADED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("MISPLACED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UNFOUND", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("BYTES", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_BYTES*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("OMAP_KEYS*", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LOG", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LOG_DUPS", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("STATE", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("SINCE", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("VERSION", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("REPORTED", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("UP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("ACTING", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("SCRUB_STAMP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("DEEP_SCRUB_STAMP", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("LAST_SCRUB_DURATION", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("SCRUB_SCHEDULING", TextTable::LEFT, TextTable::LEFT);
for (auto i = pgs.begin(); i != pgs.end(); ++i) {
const pg_stat_t& st = pg_stat.at(*i);
ostringstream reported;
reported << st.reported_epoch << ":" << st.reported_seq;
ostringstream upstr, actingstr;
upstr << pg_vector_string(st.up) << 'p' << st.up_primary;
actingstr << pg_vector_string(st.acting) << 'p' << st.acting_primary;
tab << *i
<< st.stats.sum.num_objects
<< st.stats.sum.num_objects_degraded
<< st.stats.sum.num_objects_misplaced
<< st.stats.sum.num_objects_unfound
<< st.stats.sum.num_bytes
<< st.stats.sum.num_omap_bytes
<< st.stats.sum.num_omap_keys
<< st.log_size
<< st.log_dups_size
<< pg_state_string(st.state)
<< utimespan_str(now - st.last_change)
<< st.version
<< reported.str()
<< upstr.str()
<< actingstr.str()
<< st.last_scrub_stamp
<< st.last_deep_scrub_stamp
<< st.last_scrub_duration
<< st.dump_scrub_schedule()
<< TextTable::endrow;
}
ss << tab;
}
void PGMap::dump_pool_stats_and_io_rate(int64_t poolid, const OSDMap &osd_map,
ceph::Formatter *f,
stringstream *rs) const {
const string& pool_name = osd_map.get_pool_name(poolid);
if (f) {
f->open_object_section("pool");
f->dump_string("pool_name", pool_name.c_str());
f->dump_int("pool_id", poolid);
f->open_object_section("recovery");
}
list<string> sl;
stringstream tss;
pool_recovery_summary(f, &sl, poolid);
if (!f && !sl.empty()) {
for (auto &p : sl)
tss << " " << p << "\n";
}
if (f) {
f->close_section(); // object section recovery
f->open_object_section("recovery_rate");
}
ostringstream rss;
pool_recovery_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " recovery io " << rss.str() << "\n";
if (f) {
f->close_section(); // object section recovery_rate
f->open_object_section("client_io_rate");
}
rss.clear();
rss.str("");
pool_client_io_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " client io " << rss.str() << "\n";
// dump cache tier IO rate for cache pool
const pg_pool_t *pool = osd_map.get_pg_pool(poolid);
if (pool->is_tier()) {
if (f) {
f->close_section(); // object section client_io_rate
f->open_object_section("cache_io_rate");
}
rss.clear();
rss.str("");
pool_cache_io_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " cache tier io " << rss.str() << "\n";
}
if (f) {
f->close_section(); // object section cache_io_rate
f->close_section(); // object section pool
} else {
*rs << "pool " << pool_name << " id " << poolid << "\n";
if (!tss.str().empty())
*rs << tss.str() << "\n";
else
*rs << " nothing is going on\n\n";
}
}
// Get crush parentage for an osd (skip root)
set<std::string> PGMap::osd_parentage(const OSDMap& osdmap, int id) const
{
set<std::string> reporters_by_subtree;
auto reporter_subtree_level = g_conf().get_val<string>("mon_osd_reporter_subtree_level");
auto loc = osdmap.crush->get_full_location(id);
for (auto& [parent_bucket_type, parent_id] : loc) {
// Should we show the root? Might not be too informative like "default"
if (parent_bucket_type != "root" &&
parent_bucket_type != reporter_subtree_level) {
reporters_by_subtree.insert(parent_id);
}
}
return reporters_by_subtree;
}
void PGMap::get_health_checks(
CephContext *cct,
const OSDMap& osdmap,
health_check_map_t *checks) const
{
utime_t now = ceph_clock_now();
const auto max = cct->_conf.get_val<uint64_t>("mon_health_max_detail");
const auto& pools = osdmap.get_pools();
typedef enum pg_consequence_t {
UNAVAILABLE = 1, // Client IO to the pool may block
DEGRADED = 2, // Fewer than the requested number of replicas are present
BACKFILL_FULL = 3, // Backfill is blocked for space considerations
// This may or may not be a deadlock condition.
DAMAGED = 4, // The data may be missing or inconsistent on disk and
// requires repair
RECOVERY_FULL = 5 // Recovery is blocked because OSDs are full
} pg_consequence_t;
// For a given PG state, how should it be reported at the pool level?
class PgStateResponse {
public:
pg_consequence_t consequence;
typedef std::function< utime_t(const pg_stat_t&) > stuck_cb;
stuck_cb stuck_since;
bool invert;
PgStateResponse(const pg_consequence_t& c, stuck_cb&& s)
: consequence(c), stuck_since(std::move(s)), invert(false)
{
}
PgStateResponse(const pg_consequence_t& c, stuck_cb&& s, bool i)
: consequence(c), stuck_since(std::move(s)), invert(i)
{
}
};
// Record the PG state counts that contributed to a reported pool state
class PgCauses {
public:
// Map of PG_STATE_* to number of pgs in that state.
std::map<unsigned, unsigned> states;
// List of all PG IDs that had a state contributing
// to this health condition.
std::set<pg_t> pgs;
std::map<pg_t, std::string> pg_messages;
};
// Map of PG state to how to respond to it
std::map<unsigned, PgStateResponse> state_to_response = {
// Immediate reports
{ PG_STATE_INCONSISTENT, {DAMAGED, {}} },
{ PG_STATE_INCOMPLETE, {UNAVAILABLE, {}} },
{ PG_STATE_SNAPTRIM_ERROR, {DAMAGED, {}} },
{ PG_STATE_RECOVERY_UNFOUND, {DAMAGED, {}} },
{ PG_STATE_BACKFILL_UNFOUND, {DAMAGED, {}} },
{ PG_STATE_BACKFILL_TOOFULL, {BACKFILL_FULL, {}} },
{ PG_STATE_RECOVERY_TOOFULL, {RECOVERY_FULL, {}} },
{ PG_STATE_DEGRADED, {DEGRADED, {}} },
{ PG_STATE_DOWN, {UNAVAILABLE, {}} },
// Delayed (wait until stuck) reports
{ PG_STATE_PEERING, {UNAVAILABLE, [](const pg_stat_t &p){return p.last_peered;} } },
{ PG_STATE_UNDERSIZED, {DEGRADED, [](const pg_stat_t &p){return p.last_fullsized;} } },
{ PG_STATE_STALE, {UNAVAILABLE, [](const pg_stat_t &p){return p.last_unstale;} } },
// Delayed and inverted reports
{ PG_STATE_ACTIVE, {UNAVAILABLE, [](const pg_stat_t &p){return p.last_active;}, true} }
};
// Specialized state printer that takes account of inversion of
// ACTIVE, CLEAN checks.
auto state_name = [](const uint64_t &state) {
// Special cases for the states that are inverted checks
if (state == PG_STATE_CLEAN) {
return std::string("unclean");
} else if (state == PG_STATE_ACTIVE) {
return std::string("inactive");
} else {
return pg_state_string(state);
}
};
// Map of what is wrong to information about why, implicitly also stores
// the list of what is wrong.
std::map<pg_consequence_t, PgCauses> detected;
// Optimisation: trim down the number of checks to apply based on
// the summary counters
std::map<unsigned, PgStateResponse> possible_responses;
for (const auto &i : num_pg_by_state) {
for (const auto &j : state_to_response) {
if (!j.second.invert) {
// Check for normal tests by seeing if any pgs have the flag
if (i.first & j.first) {
possible_responses.insert(j);
}
}
}
}
for (const auto &j : state_to_response) {
if (j.second.invert) {
// Check for inverted tests by seeing if not-all pgs have the flag
const auto &found = num_pg_by_state.find(j.first);
if (found == num_pg_by_state.end() || found->second != num_pg) {
possible_responses.insert(j);
}
}
}
utime_t cutoff = now - utime_t(cct->_conf.get_val<int64_t>("mon_pg_stuck_threshold"), 0);
// Loop over all PGs, if there are any possibly-unhealthy states in there
if (!possible_responses.empty()) {
for (const auto& i : pg_stat) {
const auto &pg_id = i.first;
const auto &pg_info = i.second;
for (const auto &j : state_to_response) {
const auto &pg_response_state = j.first;
const auto &pg_response = j.second;
// Apply the state test
if (!(bool(pg_info.state & pg_response_state) != pg_response.invert)) {
continue;
}
// Apply stuckness test if needed
if (pg_response.stuck_since) {
// Delayed response, check for stuckness
utime_t last_whatever = pg_response.stuck_since(pg_info);
if (last_whatever.is_zero() &&
pg_info.last_change >= cutoff) {
// still moving, ignore
continue;
} else if (last_whatever >= cutoff) {
// Not stuck enough, ignore.
continue;
} else {
}
}
auto &causes = detected[pg_response.consequence];
causes.states[pg_response_state]++;
causes.pgs.insert(pg_id);
// Don't bother composing detail string if we have already recorded
// too many
if (causes.pg_messages.size() > max) {
continue;
}
std::ostringstream ss;
if (pg_response.stuck_since) {
utime_t since = pg_response.stuck_since(pg_info);
ss << "pg " << pg_id << " is stuck " << state_name(pg_response_state);
if (since == utime_t()) {
ss << " since forever";
} else {
utime_t dur = now - since;
ss << " for " << utimespan_str(dur);
}
ss << ", current state " << pg_state_string(pg_info.state)
<< ", last acting " << pg_vector_string(pg_info.acting);
} else {
ss << "pg " << pg_id << " is "
<< pg_state_string(pg_info.state);
ss << ", acting " << pg_vector_string(pg_info.acting);
if (pg_info.stats.sum.num_objects_unfound) {
ss << ", " << pg_info.stats.sum.num_objects_unfound
<< " unfound";
}
}
if (pg_info.state & PG_STATE_INCOMPLETE) {
const pg_pool_t *pi = osdmap.get_pg_pool(pg_id.pool());
if (pi && pi->min_size > 1) {
ss << " (reducing pool "
<< osdmap.get_pool_name(pg_id.pool())
<< " min_size from " << (int)pi->min_size
<< " may help; search ceph.com/docs for 'incomplete')";
}
}
causes.pg_messages[pg_id] = ss.str();
}
}
} else {
dout(10) << __func__ << " skipping loop over PGs: counters look OK" << dendl;
}
for (const auto &i : detected) {
std::string health_code;
health_status_t sev;
std::string summary;
switch(i.first) {
case UNAVAILABLE:
health_code = "PG_AVAILABILITY";
sev = HEALTH_WARN;
summary = "Reduced data availability: ";
break;
case DEGRADED:
health_code = "PG_DEGRADED";
summary = "Degraded data redundancy: ";
sev = HEALTH_WARN;
break;
case BACKFILL_FULL:
health_code = "PG_BACKFILL_FULL";
summary = "Low space hindering backfill (add storage if this doesn't resolve itself): ";
sev = HEALTH_WARN;
break;
case DAMAGED:
health_code = "PG_DAMAGED";
summary = "Possible data damage: ";
sev = HEALTH_ERR;
break;
case RECOVERY_FULL:
health_code = "PG_RECOVERY_FULL";
summary = "Full OSDs blocking recovery: ";
sev = HEALTH_ERR;
break;
default:
ceph_abort();
}
if (i.first == DEGRADED) {
if (pg_sum.stats.sum.num_objects_degraded &&
pg_sum.stats.sum.num_object_copies > 0) {
double pc = (double)pg_sum.stats.sum.num_objects_degraded /
(double)pg_sum.stats.sum.num_object_copies * (double)100.0;
char b[20];
snprintf(b, sizeof(b), "%.3lf", pc);
ostringstream ss;
ss << pg_sum.stats.sum.num_objects_degraded
<< "/" << pg_sum.stats.sum.num_object_copies << " objects degraded ("
<< b << "%)";
// Throw in a comma for the benefit of the following PG counts
summary += ss.str() + ", ";
}
}
// Compose summary message saying how many PGs in what states led
// to this health check failing
std::vector<std::string> pg_msgs;
int64_t count = 0;
for (const auto &j : i.second.states) {
std::ostringstream msg;
msg << j.second << (j.second > 1 ? " pgs " : " pg ") << state_name(j.first);
pg_msgs.push_back(msg.str());
count += j.second;
}
summary += joinify(pg_msgs.begin(), pg_msgs.end(), std::string(", "));
health_check_t *check = &checks->add(
health_code,
sev,
summary,
count);
// Compose list of PGs contributing to this health check failing
for (const auto &j : i.second.pg_messages) {
check->detail.push_back(j.second);
}
}
// OSD_SCRUB_ERRORS
if (pg_sum.stats.sum.num_scrub_errors) {
ostringstream ss;
ss << pg_sum.stats.sum.num_scrub_errors << " scrub errors";
checks->add("OSD_SCRUB_ERRORS", HEALTH_ERR, ss.str(),
pg_sum.stats.sum.num_scrub_errors);
}
// LARGE_OMAP_OBJECTS
if (pg_sum.stats.sum.num_large_omap_objects) {
list<string> detail;
for (auto &pool : pools) {
const string& pool_name = osdmap.get_pool_name(pool.first);
auto it2 = pg_pool_sum.find(pool.first);
if (it2 == pg_pool_sum.end()) {
continue;
}
const pool_stat_t *pstat = &it2->second;
if (pstat == nullptr) {
continue;
}
const object_stat_sum_t& sum = pstat->stats.sum;
if (sum.num_large_omap_objects) {
stringstream ss;
ss << sum.num_large_omap_objects << " large objects found in pool "
<< "'" << pool_name << "'";
detail.push_back(ss.str());
}
}
if (!detail.empty()) {
ostringstream ss;
ss << pg_sum.stats.sum.num_large_omap_objects << " large omap objects";
auto& d = checks->add("LARGE_OMAP_OBJECTS", HEALTH_WARN, ss.str(),
pg_sum.stats.sum.num_large_omap_objects);
stringstream tip;
tip << "Search the cluster log for 'Large omap object found' for more "
<< "details.";
detail.push_back(tip.str());
d.detail.swap(detail);
}
}
// CACHE_POOL_NEAR_FULL
{
list<string> detail;
unsigned num_pools = 0;
for (auto& p : pools) {
if ((!p.second.target_max_objects && !p.second.target_max_bytes) ||
!pg_pool_sum.count(p.first)) {
continue;
}
bool nearfull = false;
const string& name = osdmap.get_pool_name(p.first);
const pool_stat_t& st = get_pg_pool_sum_stat(p.first);
uint64_t ratio = p.second.cache_target_full_ratio_micro +
((1000000 - p.second.cache_target_full_ratio_micro) *
cct->_conf->mon_cache_target_full_warn_ratio);
if (p.second.target_max_objects &&
(uint64_t)(st.stats.sum.num_objects -
st.stats.sum.num_objects_hit_set_archive) >
p.second.target_max_objects * (ratio / 1000000.0)) {
ostringstream ss;
ss << "cache pool '" << name << "' with "
<< si_u_t(st.stats.sum.num_objects)
<< " objects at/near target max "
<< si_u_t(p.second.target_max_objects) << " objects";
detail.push_back(ss.str());
nearfull = true;
}
if (p.second.target_max_bytes &&
(uint64_t)(st.stats.sum.num_bytes -
st.stats.sum.num_bytes_hit_set_archive) >
p.second.target_max_bytes * (ratio / 1000000.0)) {
ostringstream ss;
ss << "cache pool '" << name
<< "' with " << byte_u_t(st.stats.sum.num_bytes)
<< " at/near target max "
<< byte_u_t(p.second.target_max_bytes);
detail.push_back(ss.str());
nearfull = true;
}
if (nearfull) {
++num_pools;
}
}
if (!detail.empty()) {
ostringstream ss;
ss << num_pools << " cache pools at or near target size";
auto& d = checks->add("CACHE_POOL_NEAR_FULL", HEALTH_WARN, ss.str(),
num_pools);
d.detail.swap(detail);
}
}
// TOO_FEW_PGS
unsigned num_in = osdmap.get_num_in_osds();
auto sum_pg_up = std::max(static_cast<size_t>(pg_sum.up), pg_stat.size());
const auto min_pg_per_osd =
cct->_conf.get_val<uint64_t>("mon_pg_warn_min_per_osd");
if (num_in && min_pg_per_osd > 0 && osdmap.get_pools().size() > 0) {
auto per = sum_pg_up / num_in;
if (per < min_pg_per_osd && per) {
ostringstream ss;
ss << "too few PGs per OSD (" << per
<< " < min " << min_pg_per_osd << ")";
checks->add("TOO_FEW_PGS", HEALTH_WARN, ss.str(),
min_pg_per_osd - per);
}
}
// TOO_MANY_PGS
auto max_pg_per_osd = cct->_conf.get_val<uint64_t>("mon_max_pg_per_osd");
if (num_in && max_pg_per_osd > 0) {
auto per = sum_pg_up / num_in;
if (per > max_pg_per_osd) {
ostringstream ss;
ss << "too many PGs per OSD (" << per
<< " > max " << max_pg_per_osd << ")";
checks->add("TOO_MANY_PGS", HEALTH_WARN, ss.str(),
per - max_pg_per_osd);
}
}
// TOO_FEW_OSDS
auto warn_too_few_osds = cct->_conf.get_val<bool>("mon_warn_on_too_few_osds");
auto osd_pool_default_size = cct->_conf.get_val<uint64_t>("osd_pool_default_size");
if (warn_too_few_osds && osdmap.get_num_osds() < osd_pool_default_size) {
ostringstream ss;
ss << "OSD count " << osdmap.get_num_osds()
<< " < osd_pool_default_size " << osd_pool_default_size;
checks->add("TOO_FEW_OSDS", HEALTH_WARN, ss.str(),
osd_pool_default_size - osdmap.get_num_osds());
}
// SLOW_PING_TIME
// Convert milliseconds to microseconds
auto warn_slow_ping_time = cct->_conf.get_val<double>("mon_warn_on_slow_ping_time") * 1000;
auto grace = cct->_conf.get_val<int64_t>("osd_heartbeat_grace");
if (warn_slow_ping_time == 0) {
double ratio = cct->_conf.get_val<double>("mon_warn_on_slow_ping_ratio");
warn_slow_ping_time = grace;
warn_slow_ping_time *= 1000000 * ratio; // Seconds of grace to microseconds at ratio
}
if (warn_slow_ping_time > 0) {
struct mon_ping_item_t {
uint32_t pingtime;
int from;
int to;
bool improving;
bool operator<(const mon_ping_item_t& rhs) const {
if (pingtime < rhs.pingtime)
return true;
if (pingtime > rhs.pingtime)
return false;
if (from < rhs.from)
return true;
if (from > rhs.from)
return false;
return to < rhs.to;
}
};
list<string> detail_back;
list<string> detail_front;
list<string> detail;
set<mon_ping_item_t> back_sorted, front_sorted;
for (auto i : osd_stat) {
for (auto j : i.second.hb_pingtime) {
// Maybe source info is old
if (now.sec() - j.second.last_update > grace * 60)
continue;
mon_ping_item_t back;
back.pingtime = std::max(j.second.back_pingtime[0], j.second.back_pingtime[1]);
back.pingtime = std::max(back.pingtime, j.second.back_pingtime[2]);
back.from = i.first;
back.to = j.first;
if (back.pingtime > warn_slow_ping_time) {
back.improving = (j.second.back_pingtime[0] < j.second.back_pingtime[1]
&& j.second.back_pingtime[1] < j.second.back_pingtime[2]);
back_sorted.emplace(back);
}
mon_ping_item_t front;
front.pingtime = std::max(j.second.front_pingtime[0], j.second.front_pingtime[1]);
front.pingtime = std::max(front.pingtime, j.second.front_pingtime[2]);
front.from = i.first;
front.to = j.first;
if (front.pingtime > warn_slow_ping_time) {
front.improving = (j.second.front_pingtime[0] < j.second.front_pingtime[1]
&& j.second.front_pingtime[1] < j.second.back_pingtime[2]);
front_sorted.emplace(front);
}
}
if (i.second.num_shards_repaired >
cct->_conf.get_val<uint64_t>("mon_osd_warn_num_repaired")) {
ostringstream ss;
ss << "osd." << i.first << " had " << i.second.num_shards_repaired << " reads repaired";
detail.push_back(ss.str());
}
}
if (!detail.empty()) {
ostringstream ss;
ss << "Too many repaired reads on " << detail.size() << " OSDs";
auto& d = checks->add("OSD_TOO_MANY_REPAIRS", HEALTH_WARN, ss.str(),
detail.size());
d.detail.swap(detail);
}
int max_detail = 10;
for (auto &sback : boost::adaptors::reverse(back_sorted)) {
ostringstream ss;
if (max_detail == 0) {
ss << "Truncated long network list. Use ceph daemon mgr.# dump_osd_network for more information";
detail_back.push_back(ss.str());
break;
}
max_detail--;
ss << "Slow OSD heartbeats on back from osd." << sback.from
<< " [" << osd_parentage(osdmap, sback.from) << "]"
<< (osdmap.is_down(sback.from) ? " (down)" : "")
<< " to osd." << sback.to
<< " [" << osd_parentage(osdmap, sback.to) << "]"
<< (osdmap.is_down(sback.to) ? " (down)" : "")
<< " " << fixed_u_to_string(sback.pingtime, 3) << " msec"
<< (sback.improving ? " possibly improving" : "");
detail_back.push_back(ss.str());
}
max_detail = 10;
for (auto &sfront : boost::adaptors::reverse(front_sorted)) {
ostringstream ss;
if (max_detail == 0) {
ss << "Truncated long network list. Use ceph daemon mgr.# dump_osd_network for more information";
detail_front.push_back(ss.str());
break;
}
max_detail--;
// Get crush parentage for each osd
ss << "Slow OSD heartbeats on front from osd." << sfront.from
<< " [" << osd_parentage(osdmap, sfront.from) << "]"
<< (osdmap.is_down(sfront.from) ? " (down)" : "")
<< " to osd." << sfront.to
<< " [" << osd_parentage(osdmap, sfront.to) << "]"
<< (osdmap.is_down(sfront.to) ? " (down)" : "")
<< " " << fixed_u_to_string(sfront.pingtime, 3) << " msec"
<< (sfront.improving ? " possibly improving" : "");
detail_front.push_back(ss.str());
}
if (detail_back.size() != 0) {
ostringstream ss;
ss << "Slow OSD heartbeats on back (longest "
<< fixed_u_to_string(back_sorted.rbegin()->pingtime, 3) << "ms)";
auto& d = checks->add("OSD_SLOW_PING_TIME_BACK", HEALTH_WARN, ss.str(),
back_sorted.size());
d.detail.swap(detail_back);
}
if (detail_front.size() != 0) {
ostringstream ss;
ss << "Slow OSD heartbeats on front (longest "
<< fixed_u_to_string(front_sorted.rbegin()->pingtime, 3) << "ms)";
auto& d = checks->add("OSD_SLOW_PING_TIME_FRONT", HEALTH_WARN, ss.str(),
front_sorted.size());
d.detail.swap(detail_front);
}
}
// SMALLER_PGP_NUM
// MANY_OBJECTS_PER_PG
if (!pg_stat.empty()) {
list<string> pgp_detail, many_detail;
const auto mon_pg_warn_min_objects =
cct->_conf.get_val<int64_t>("mon_pg_warn_min_objects");
const auto mon_pg_warn_min_pool_objects =
cct->_conf.get_val<int64_t>("mon_pg_warn_min_pool_objects");
const auto mon_pg_warn_max_object_skew =
cct->_conf.get_val<double>("mon_pg_warn_max_object_skew");
for (auto p = pg_pool_sum.begin();
p != pg_pool_sum.end();
++p) {
const pg_pool_t *pi = osdmap.get_pg_pool(p->first);
if (!pi)
continue; // in case osdmap changes haven't propagated to PGMap yet
const string& name = osdmap.get_pool_name(p->first);
// NOTE: we use pg_num_target and pgp_num_target for the purposes of
// the warnings. If the cluster is failing to converge on the target
// values that is a separate issue!
if (pi->get_pg_num_target() > pi->get_pgp_num_target() &&
!(name.find(".DELETED") != string::npos &&
cct->_conf->mon_fake_pool_delete)) {
ostringstream ss;
ss << "pool " << name << " pg_num "
<< pi->get_pg_num_target()
<< " > pgp_num " << pi->get_pgp_num_target();
pgp_detail.push_back(ss.str());
}
int average_objects_per_pg = pg_sum.stats.sum.num_objects / pg_stat.size();
if (average_objects_per_pg > 0 &&
pg_sum.stats.sum.num_objects >= mon_pg_warn_min_objects &&
p->second.stats.sum.num_objects >= mon_pg_warn_min_pool_objects) {
int objects_per_pg = p->second.stats.sum.num_objects /
pi->get_pg_num_target();
float ratio = (float)objects_per_pg / (float)average_objects_per_pg;
if (mon_pg_warn_max_object_skew > 0 &&
ratio > mon_pg_warn_max_object_skew) {
ostringstream ss;
if (pi->pg_autoscale_mode != pg_pool_t::pg_autoscale_mode_t::ON) {
ss << "pool " << name << " objects per pg ("
<< objects_per_pg << ") is more than " << ratio
<< " times cluster average ("
<< average_objects_per_pg << ")";
many_detail.push_back(ss.str());
}
}
}
}
if (!pgp_detail.empty()) {
ostringstream ss;
ss << pgp_detail.size() << " pools have pg_num > pgp_num";
auto& d = checks->add("SMALLER_PGP_NUM", HEALTH_WARN, ss.str(),
pgp_detail.size());
d.detail.swap(pgp_detail);
}
if (!many_detail.empty()) {
ostringstream ss;
ss << many_detail.size() << " pools have many more objects per pg than"
<< " average";
auto& d = checks->add("MANY_OBJECTS_PER_PG", HEALTH_WARN, ss.str(),
many_detail.size());
d.detail.swap(many_detail);
}
}
// POOL_FULL
// POOL_NEAR_FULL
{
float warn_threshold = (float)g_conf().get_val<int64_t>("mon_pool_quota_warn_threshold")/100;
float crit_threshold = (float)g_conf().get_val<int64_t>("mon_pool_quota_crit_threshold")/100;
list<string> full_detail, nearfull_detail;
unsigned full_pools = 0, nearfull_pools = 0;
for (auto it : pools) {
auto it2 = pg_pool_sum.find(it.first);
if (it2 == pg_pool_sum.end()) {
continue;
}
const pool_stat_t *pstat = &it2->second;
const object_stat_sum_t& sum = pstat->stats.sum;
const string& pool_name = osdmap.get_pool_name(it.first);
const pg_pool_t &pool = it.second;
bool full = false, nearfull = false;
if (pool.quota_max_objects > 0) {
stringstream ss;
if ((uint64_t)sum.num_objects >= pool.quota_max_objects) {
} else if (crit_threshold > 0 &&
sum.num_objects >= pool.quota_max_objects*crit_threshold) {
ss << "pool '" << pool_name
<< "' has " << sum.num_objects << " objects"
<< " (max " << pool.quota_max_objects << ")";
full_detail.push_back(ss.str());
full = true;
} else if (warn_threshold > 0 &&
sum.num_objects >= pool.quota_max_objects*warn_threshold) {
ss << "pool '" << pool_name
<< "' has " << sum.num_objects << " objects"
<< " (max " << pool.quota_max_objects << ")";
nearfull_detail.push_back(ss.str());
nearfull = true;
}
}
if (pool.quota_max_bytes > 0) {
stringstream ss;
if ((uint64_t)sum.num_bytes >= pool.quota_max_bytes) {
} else if (crit_threshold > 0 &&
sum.num_bytes >= pool.quota_max_bytes*crit_threshold) {
ss << "pool '" << pool_name
<< "' has " << byte_u_t(sum.num_bytes)
<< " (max " << byte_u_t(pool.quota_max_bytes) << ")";
full_detail.push_back(ss.str());
full = true;
} else if (warn_threshold > 0 &&
sum.num_bytes >= pool.quota_max_bytes*warn_threshold) {
ss << "pool '" << pool_name
<< "' has " << byte_u_t(sum.num_bytes)
<< " (max " << byte_u_t(pool.quota_max_bytes) << ")";
nearfull_detail.push_back(ss.str());
nearfull = true;
}
}
if (full) {
++full_pools;
}
if (nearfull) {
++nearfull_pools;
}
}
if (full_pools) {
ostringstream ss;
ss << full_pools << " pools full";
auto& d = checks->add("POOL_FULL", HEALTH_ERR, ss.str(), full_pools);
d.detail.swap(full_detail);
}
if (nearfull_pools) {
ostringstream ss;
ss << nearfull_pools << " pools nearfull";
auto& d = checks->add("POOL_NEAR_FULL", HEALTH_WARN, ss.str(), nearfull_pools);
d.detail.swap(nearfull_detail);
}
}
// OBJECT_MISPLACED
if (pg_sum.stats.sum.num_objects_misplaced &&
pg_sum.stats.sum.num_object_copies > 0 &&
cct->_conf->mon_warn_on_misplaced) {
double pc = (double)pg_sum.stats.sum.num_objects_misplaced /
(double)pg_sum.stats.sum.num_object_copies * (double)100.0;
char b[20];
snprintf(b, sizeof(b), "%.3lf", pc);
ostringstream ss;
ss << pg_sum.stats.sum.num_objects_misplaced
<< "/" << pg_sum.stats.sum.num_object_copies << " objects misplaced ("
<< b << "%)";
checks->add("OBJECT_MISPLACED", HEALTH_WARN, ss.str(),
pg_sum.stats.sum.num_objects_misplaced);
}
// OBJECT_UNFOUND
if (pg_sum.stats.sum.num_objects_unfound &&
pg_sum.stats.sum.num_objects) {
double pc = (double)pg_sum.stats.sum.num_objects_unfound /
(double)pg_sum.stats.sum.num_objects * (double)100.0;
char b[20];
snprintf(b, sizeof(b), "%.3lf", pc);
ostringstream ss;
ss << pg_sum.stats.sum.num_objects_unfound
<< "/" << pg_sum.stats.sum.num_objects << " objects unfound (" << b << "%)";
auto& d = checks->add("OBJECT_UNFOUND", HEALTH_WARN, ss.str(),
pg_sum.stats.sum.num_objects_unfound);
for (auto& p : pg_stat) {
if (p.second.stats.sum.num_objects_unfound) {
ostringstream ss;
ss << "pg " << p.first
<< " has " << p.second.stats.sum.num_objects_unfound
<< " unfound objects";
d.detail.push_back(ss.str());
if (d.detail.size() > max) {
d.detail.push_back("(additional pgs left out for brevity)");
break;
}
}
}
}
// REQUEST_SLOW
// REQUEST_STUCK
// SLOW_OPS unifies them in mimic.
if (osdmap.require_osd_release < ceph_release_t::mimic &&
cct->_conf->mon_osd_warn_op_age > 0 &&
!osd_sum.op_queue_age_hist.h.empty() &&
osd_sum.op_queue_age_hist.upper_bound() / 1000.0 >
cct->_conf->mon_osd_warn_op_age) {
list<string> warn_detail, error_detail;
unsigned warn = 0, error = 0;
float err_age =
cct->_conf->mon_osd_warn_op_age * cct->_conf->mon_osd_err_op_age_ratio;
const pow2_hist_t& h = osd_sum.op_queue_age_hist;
for (unsigned i = h.h.size() - 1; i > 0; --i) {
float ub = (float)(1 << i) / 1000.0;
if (ub < cct->_conf->mon_osd_warn_op_age)
break;
if (h.h[i]) {
ostringstream ss;
ss << h.h[i] << " ops are blocked > " << ub << " sec";
if (ub > err_age) {
error += h.h[i];
error_detail.push_back(ss.str());
} else {
warn += h.h[i];
warn_detail.push_back(ss.str());
}
}
}
map<float,set<int>> warn_osd_by_max; // max -> osds
map<float,set<int>> error_osd_by_max; // max -> osds
if (!warn_detail.empty() || !error_detail.empty()) {
for (auto& p : osd_stat) {
const pow2_hist_t& h = p.second.op_queue_age_hist;
for (unsigned i = h.h.size() - 1; i > 0; --i) {
float ub = (float)(1 << i) / 1000.0;
if (ub < cct->_conf->mon_osd_warn_op_age)
break;
if (h.h[i]) {
if (ub > err_age) {
error_osd_by_max[ub].insert(p.first);
} else {
warn_osd_by_max[ub].insert(p.first);
}
break;
}
}
}
}
if (!warn_detail.empty()) {
ostringstream ss;
ss << warn << " slow requests are blocked > "
<< cct->_conf->mon_osd_warn_op_age << " sec";
auto& d = checks->add("REQUEST_SLOW", HEALTH_WARN, ss.str(), warn);
d.detail.swap(warn_detail);
int left = max;
for (auto& p : warn_osd_by_max) {
ostringstream ss;
if (p.second.size() > 1) {
ss << "osds " << p.second
<< " have blocked requests > " << p.first << " sec";
} else {
ss << "osd." << *p.second.begin()
<< " has blocked requests > " << p.first << " sec";
}
d.detail.push_back(ss.str());
if (--left == 0) {
break;
}
}
}
if (!error_detail.empty()) {
ostringstream ss;
ss << error << " stuck requests are blocked > "
<< err_age << " sec";
auto& d = checks->add("REQUEST_STUCK", HEALTH_ERR, ss.str(), error);
d.detail.swap(error_detail);
int left = max;
for (auto& p : error_osd_by_max) {
ostringstream ss;
if (p.second.size() > 1) {
ss << "osds " << p.second
<< " have stuck requests > " << p.first << " sec";
} else {
ss << "osd." << *p.second.begin()
<< " has stuck requests > " << p.first << " sec";
}
d.detail.push_back(ss.str());
if (--left == 0) {
break;
}
}
}
}
// OBJECT_STORE_WARN
if (osd_sum.os_alerts.size()) {
map<string, pair<size_t, list<string>>> os_alerts_sum;
for (auto& a : osd_sum.os_alerts) {
int left = max;
string s0 = " osd.";
s0 += stringify(a.first);
for (auto& aa : a.second) {
string s(s0);
s += " ";
s += aa.second;
auto it = os_alerts_sum.find(aa.first);
if (it == os_alerts_sum.end()) {
list<string> d;
d.emplace_back(s);
os_alerts_sum.emplace(aa.first, std::make_pair(1, d));
} else {
auto& p = it->second;
++p.first;
p.second.emplace_back(s);
}
if (--left == 0) {
break;
}
}
}
for (auto& asum : os_alerts_sum) {
string summary = stringify(asum.second.first) + " OSD(s)";
if (asum.first == "BLUEFS_SPILLOVER") {
summary += " experiencing BlueFS spillover";
} else if (asum.first == "BLUESTORE_NO_COMPRESSION") {
summary += " have broken BlueStore compression";
} else if (asum.first == "BLUESTORE_LEGACY_STATFS") {
summary += " reporting legacy (not per-pool) BlueStore stats";
} else if (asum.first == "BLUESTORE_DISK_SIZE_MISMATCH") {
summary += " have dangerous mismatch between BlueStore block device and free list sizes";
} else if (asum.first == "BLUESTORE_NO_PER_PG_OMAP") {
summary += " reporting legacy (not per-pg) BlueStore omap";
} else if (asum.first == "BLUESTORE_NO_PER_POOL_OMAP") {
summary += " reporting legacy (not per-pool) BlueStore omap usage stats";
} else if (asum.first == "BLUESTORE_SPURIOUS_READ_ERRORS") {
summary += " have spurious read errors";
}
auto& d = checks->add(asum.first, HEALTH_WARN, summary, asum.second.first);
for (auto& s : asum.second.second) {
d.detail.push_back(s);
}
}
}
// PG_NOT_SCRUBBED
// PG_NOT_DEEP_SCRUBBED
if (cct->_conf->mon_warn_pg_not_scrubbed_ratio ||
cct->_conf->mon_warn_pg_not_deep_scrubbed_ratio) {
list<string> detail, deep_detail;
int detail_max = max, deep_detail_max = max;
int detail_more = 0, deep_detail_more = 0;
int detail_total = 0, deep_detail_total = 0;
for (auto& p : pg_stat) {
int64_t pnum = p.first.pool();
auto pool = osdmap.get_pg_pool(pnum);
if (!pool)
continue;
if (cct->_conf->mon_warn_pg_not_scrubbed_ratio) {
double scrub_max_interval = 0;
pool->opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &scrub_max_interval);
if (scrub_max_interval <= 0) {
scrub_max_interval = cct->_conf->osd_scrub_max_interval;
}
const double age = (cct->_conf->mon_warn_pg_not_scrubbed_ratio * scrub_max_interval) +
scrub_max_interval;
utime_t cutoff = now;
cutoff -= age;
if (p.second.last_scrub_stamp < cutoff) {
if (detail_max > 0) {
ostringstream ss;
ss << "pg " << p.first << " not scrubbed since "
<< p.second.last_scrub_stamp;
detail.push_back(ss.str());
--detail_max;
} else {
++detail_more;
}
++detail_total;
}
}
if (cct->_conf->mon_warn_pg_not_deep_scrubbed_ratio) {
double deep_scrub_interval = 0;
pool->opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL, &deep_scrub_interval);
if (deep_scrub_interval <= 0) {
deep_scrub_interval = cct->_conf->osd_deep_scrub_interval;
}
double deep_age = (cct->_conf->mon_warn_pg_not_deep_scrubbed_ratio * deep_scrub_interval) +
deep_scrub_interval;
utime_t deep_cutoff = now;
deep_cutoff -= deep_age;
if (p.second.last_deep_scrub_stamp < deep_cutoff) {
if (deep_detail_max > 0) {
ostringstream ss;
ss << "pg " << p.first << " not deep-scrubbed since "
<< p.second.last_deep_scrub_stamp;
deep_detail.push_back(ss.str());
--deep_detail_max;
} else {
++deep_detail_more;
}
++deep_detail_total;
}
}
}
if (detail_total) {
ostringstream ss;
ss << detail_total << " pgs not scrubbed in time";
auto& d = checks->add("PG_NOT_SCRUBBED", HEALTH_WARN, ss.str(), detail_total);
if (!detail.empty()) {
d.detail.swap(detail);
if (detail_more) {
ostringstream ss;
ss << detail_more << " more pgs... ";
d.detail.push_back(ss.str());
}
}
}
if (deep_detail_total) {
ostringstream ss;
ss << deep_detail_total << " pgs not deep-scrubbed in time";
auto& d = checks->add("PG_NOT_DEEP_SCRUBBED", HEALTH_WARN, ss.str(),
deep_detail_total);
if (!deep_detail.empty()) {
d.detail.swap(deep_detail);
if (deep_detail_more) {
ostringstream ss;
ss << deep_detail_more << " more pgs... ";
d.detail.push_back(ss.str());
}
}
}
}
// POOL_APP
if (g_conf().get_val<bool>("mon_warn_on_pool_no_app")) {
list<string> detail;
for (auto &it : pools) {
const pg_pool_t &pool = it.second;
const string& pool_name = osdmap.get_pool_name(it.first);
auto it2 = pg_pool_sum.find(it.first);
if (it2 == pg_pool_sum.end()) {
continue;
}
const pool_stat_t *pstat = &it2->second;
if (pstat == nullptr) {
continue;
}
const object_stat_sum_t& sum = pstat->stats.sum;
// application metadata is not encoded until luminous is minimum
// required release
if (sum.num_objects > 0 && pool.application_metadata.empty() &&
!pool.is_tier()) {
stringstream ss;
ss << "application not enabled on pool '" << pool_name << "'";
detail.push_back(ss.str());
}
}
if (!detail.empty()) {
ostringstream ss;
ss << detail.size() << " pool(s) do not have an application enabled";
auto& d = checks->add("POOL_APP_NOT_ENABLED", HEALTH_WARN, ss.str(),
detail.size());
stringstream tip;
tip << "use 'ceph osd pool application enable <pool-name> "
<< "<app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', "
<< "or freeform for custom applications.";
detail.push_back(tip.str());
d.detail.swap(detail);
}
}
// PG_SLOW_SNAP_TRIMMING
if (!pg_stat.empty() && cct->_conf->mon_osd_snap_trim_queue_warn_on > 0) {
uint32_t snapthreshold = cct->_conf->mon_osd_snap_trim_queue_warn_on;
uint64_t snaptrimq_exceeded = 0;
uint32_t longest_queue = 0;
const pg_t* longest_q_pg = nullptr;
list<string> detail;
for (auto& i: pg_stat) {
uint32_t current_len = i.second.snaptrimq_len;
if (current_len >= snapthreshold) {
snaptrimq_exceeded++;
if (longest_queue <= current_len) {
longest_q_pg = &i.first;
longest_queue = current_len;
}
if (detail.size() < max - 1) {
stringstream ss;
ss << "snap trim queue for pg " << i.first << " at " << current_len;
detail.push_back(ss.str());
continue;
}
if (detail.size() < max) {
detail.push_back("...more pgs affected");
continue;
}
}
}
if (snaptrimq_exceeded) {
{
ostringstream ss;
ss << "longest queue on pg " << *longest_q_pg << " at " << longest_queue;
detail.push_back(ss.str());
}
stringstream ss;
ss << "snap trim queue for " << snaptrimq_exceeded << " pg(s) >= " << snapthreshold << " (mon_osd_snap_trim_queue_warn_on)";
auto& d = checks->add("PG_SLOW_SNAP_TRIMMING", HEALTH_WARN, ss.str(),
snaptrimq_exceeded);
detail.push_back("try decreasing \"osd snap trim sleep\" and/or increasing \"osd pg max concurrent snap trims\".");
d.detail.swap(detail);
}
}
}
void PGMap::print_summary(ceph::Formatter *f, ostream *out) const
{
if (f) {
f->open_array_section("pgs_by_pool_state");
for (auto& i: num_pg_by_pool_state) {
f->open_object_section("per_pool_pgs_by_state");
f->dump_int("pool_id", i.first);
f->open_array_section("pg_state_counts");
for (auto& j : i.second) {
f->open_object_section("pg_state_count");
f->dump_string("state_name", pg_state_string(j.first));
f->dump_int("count", j.second);
f->close_section();
}
f->close_section();
f->close_section();
}
f->close_section();
}
PGMapDigest::print_summary(f, out);
}
int process_pg_map_command(
const string& orig_prefix,
const cmdmap_t& orig_cmdmap,
const PGMap& pg_map,
const OSDMap& osdmap,
ceph::Formatter *f,
stringstream *ss,
bufferlist *odata)
{
string prefix = orig_prefix;
auto cmdmap = orig_cmdmap;
string omap_stats_note =
"\n* NOTE: Omap statistics are gathered during deep scrub and "
"may be inaccurate soon afterwards depending on utilization. See "
"http://docs.ceph.com/en/latest/dev/placement-group/#omap-statistics "
"for further details.\n";
bool omap_stats_note_required = false;
// perhaps these would be better in the parsing, but it's weird
bool primary = false;
if (prefix == "pg dump_json") {
vector<string> v;
v.push_back(string("all"));
cmd_putval(g_ceph_context, cmdmap, "dumpcontents", v);
prefix = "pg dump";
} else if (prefix == "pg dump_pools_json") {
vector<string> v;
v.push_back(string("pools"));
cmd_putval(g_ceph_context, cmdmap, "dumpcontents", v);
prefix = "pg dump";
} else if (prefix == "pg ls-by-primary") {
primary = true;
prefix = "pg ls";
} else if (prefix == "pg ls-by-osd") {
prefix = "pg ls";
} else if (prefix == "pg ls-by-pool") {
prefix = "pg ls";
string poolstr;
cmd_getval(cmdmap, "poolstr", poolstr);
int64_t pool = osdmap.lookup_pg_pool_name(poolstr.c_str());
if (pool < 0) {
*ss << "pool " << poolstr << " does not exist";
return -ENOENT;
}
cmd_putval(g_ceph_context, cmdmap, "pool", pool);
}
stringstream ds;
if (prefix == "pg stat") {
if (f) {
f->open_object_section("pg_summary");
pg_map.print_oneline_summary(f, NULL);
f->close_section();
f->flush(ds);
} else {
ds << pg_map;
}
odata->append(ds);
return 0;
}
if (prefix == "pg getmap") {
pg_map.encode(*odata);
*ss << "got pgmap version " << pg_map.version;
return 0;
}
if (prefix == "pg dump") {
string val;
vector<string> dumpcontents;
set<string> what;
if (cmd_getval(cmdmap, "dumpcontents", dumpcontents)) {
copy(dumpcontents.begin(), dumpcontents.end(),
inserter(what, what.end()));
}
if (what.empty())
what.insert("all");
if (f) {
if (what.count("all")) {
f->open_object_section("pg_map");
pg_map.dump(f);
f->close_section();
} else if (what.count("summary") || what.count("sum")) {
f->open_object_section("pg_map");
pg_map.dump_basic(f);
f->close_section();
} else {
if (what.count("pools")) {
pg_map.dump_pool_stats(f);
}
if (what.count("osds")) {
pg_map.dump_osd_stats(f);
}
if (what.count("pgs")) {
pg_map.dump_pg_stats(f, false);
}
if (what.count("pgs_brief")) {
pg_map.dump_pg_stats(f, true);
}
if (what.count("delta")) {
f->open_object_section("delta");
pg_map.dump_delta(f);
f->close_section();
}
}
f->flush(*odata);
} else {
if (what.count("all")) {
pg_map.dump(ds);
omap_stats_note_required = true;
} else if (what.count("summary") || what.count("sum")) {
pg_map.dump_basic(ds);
pg_map.dump_pg_sum_stats(ds, true);
pg_map.dump_osd_sum_stats(ds);
omap_stats_note_required = true;
} else {
if (what.count("pgs_brief")) {
pg_map.dump_pg_stats(ds, true);
}
bool header = true;
if (what.count("pgs")) {
pg_map.dump_pg_stats(ds, false);
header = false;
omap_stats_note_required = true;
}
if (what.count("pools")) {
pg_map.dump_pool_stats(ds, header);
omap_stats_note_required = true;
}
if (what.count("osds")) {
pg_map.dump_osd_stats(ds);
}
}
odata->append(ds);
if (omap_stats_note_required) {
odata->append(omap_stats_note);
}
}
*ss << "dumped " << what;
return 0;
}
if (prefix == "pg ls") {
int64_t osd = -1;
int64_t pool = -1;
vector<string>states;
set<pg_t> pgs;
cmd_getval(cmdmap, "pool", pool);
cmd_getval(cmdmap, "osd", osd);
cmd_getval(cmdmap, "states", states);
if (pool >= 0 && !osdmap.have_pg_pool(pool)) {
*ss << "pool " << pool << " does not exist";
return -ENOENT;
}
if (osd >= 0 && !osdmap.is_up(osd)) {
*ss << "osd " << osd << " is not up";
return -EAGAIN;
}
if (states.empty())
states.push_back("all");
uint64_t state = 0;
while (!states.empty()) {
string state_str = states.back();
if (state_str == "all") {
state = -1;
break;
} else {
auto filter = pg_string_state(state_str);
if (!filter) {
*ss << "'" << state_str << "' is not a valid pg state,"
<< " available choices: " << pg_state_string(0xFFFFFFFF);
return -EINVAL;
}
state |= *filter;
}
states.pop_back();
}
pg_map.get_filtered_pg_stats(state, pool, osd, primary, pgs);
if (f && !pgs.empty()) {
pg_map.dump_filtered_pg_stats(f, pgs);
f->flush(*odata);
} else if (!pgs.empty()) {
pg_map.dump_filtered_pg_stats(ds, pgs);
odata->append(ds);
odata->append(omap_stats_note);
}
return 0;
}
if (prefix == "pg dump_stuck") {
vector<string> stuckop_vec;
cmd_getval(cmdmap, "stuckops", stuckop_vec);
if (stuckop_vec.empty())
stuckop_vec.push_back("unclean");
const int64_t threshold = cmd_getval_or<int64_t>(
cmdmap, "threshold",
g_conf().get_val<int64_t>("mon_pg_stuck_threshold"));
if (pg_map.dump_stuck_pg_stats(ds, f, (int)threshold, stuckop_vec) < 0) {
*ss << "failed";
} else {
*ss << "ok";
}
odata->append(ds);
return 0;
}
if (prefix == "pg debug") {
const string debugop = cmd_getval_or<string>(
cmdmap, "debugop",
"unfound_objects_exist");
if (debugop == "unfound_objects_exist") {
bool unfound_objects_exist = false;
for (const auto& p : pg_map.pg_stat) {
if (p.second.stats.sum.num_objects_unfound > 0) {
unfound_objects_exist = true;
break;
}
}
if (unfound_objects_exist)
ds << "TRUE";
else
ds << "FALSE";
odata->append(ds);
return 0;
}
if (debugop == "degraded_pgs_exist") {
bool degraded_pgs_exist = false;
for (const auto& p : pg_map.pg_stat) {
if (p.second.stats.sum.num_objects_degraded > 0) {
degraded_pgs_exist = true;
break;
}
}
if (degraded_pgs_exist)
ds << "TRUE";
else
ds << "FALSE";
odata->append(ds);
return 0;
}
}
if (prefix == "osd perf") {
if (f) {
f->open_object_section("osdstats");
pg_map.dump_osd_perf_stats(f);
f->close_section();
f->flush(ds);
} else {
pg_map.print_osd_perf_stats(&ds);
}
odata->append(ds);
return 0;
}
if (prefix == "osd blocked-by") {
if (f) {
f->open_object_section("osd_blocked_by");
pg_map.dump_osd_blocked_by_stats(f);
f->close_section();
f->flush(ds);
} else {
pg_map.print_osd_blocked_by_stats(&ds);
}
odata->append(ds);
return 0;
}
return -EOPNOTSUPP;
}
void PGMapUpdater::check_osd_map(
CephContext *cct,
const OSDMap& osdmap,
const PGMap& pgmap,
PGMap::Incremental *pending_inc)
{
for (auto& p : pgmap.osd_stat) {
if (!osdmap.exists(p.first)) {
// remove osd_stat
pending_inc->rm_stat(p.first);
} else if (osdmap.is_out(p.first)) {
// zero osd_stat
if (p.second.statfs.total != 0) {
pending_inc->stat_osd_out(p.first);
}
} else if (!osdmap.is_up(p.first)) {
// zero the op_queue_age_hist
if (!p.second.op_queue_age_hist.empty()) {
pending_inc->stat_osd_down_up(p.first, pgmap);
}
}
}
// deleted pgs (pools)?
for (auto& p : pgmap.pg_pool_sum) {
if (!osdmap.have_pg_pool(p.first)) {
ldout(cct, 10) << __func__ << " pool " << p.first << " gone, removing pgs"
<< dendl;
for (auto& q : pgmap.pg_stat) {
if (q.first.pool() == p.first) {
pending_inc->pg_remove.insert(q.first);
}
}
auto q = pending_inc->pg_stat_updates.begin();
while (q != pending_inc->pg_stat_updates.end()) {
if (q->first.pool() == p.first) {
q = pending_inc->pg_stat_updates.erase(q);
} else {
++q;
}
}
}
}
// new (split or new pool) or merged pgs?
map<int64_t,unsigned> new_pg_num;
for (auto& p : osdmap.get_pools()) {
int64_t poolid = p.first;
const pg_pool_t& pi = p.second;
auto q = pgmap.num_pg_by_pool.find(poolid);
unsigned my_pg_num = 0;
if (q != pgmap.num_pg_by_pool.end())
my_pg_num = q->second;
unsigned pg_num = pi.get_pg_num();
new_pg_num[poolid] = pg_num;
if (my_pg_num < pg_num) {
ldout(cct,10) << __func__ << " pool " << poolid << " pg_num " << pg_num
<< " > my pg_num " << my_pg_num << dendl;
for (unsigned ps = my_pg_num; ps < pg_num; ++ps) {
pg_t pgid(ps, poolid);
if (pending_inc->pg_stat_updates.count(pgid) == 0) {
ldout(cct,20) << __func__ << " adding " << pgid << dendl;
pg_stat_t &stats = pending_inc->pg_stat_updates[pgid];
stats.last_fresh = osdmap.get_modified();
stats.last_active = osdmap.get_modified();
stats.last_change = osdmap.get_modified();
stats.last_peered = osdmap.get_modified();
stats.last_clean = osdmap.get_modified();
stats.last_unstale = osdmap.get_modified();
stats.last_undegraded = osdmap.get_modified();
stats.last_fullsized = osdmap.get_modified();
stats.last_scrub_stamp = osdmap.get_modified();
stats.last_deep_scrub_stamp = osdmap.get_modified();
stats.last_clean_scrub_stamp = osdmap.get_modified();
}
}
} else if (my_pg_num > pg_num) {
ldout(cct,10) << __func__ << " pool " << poolid << " pg_num " << pg_num
<< " < my pg_num " << my_pg_num << dendl;
for (unsigned i = pg_num; i < my_pg_num; ++i) {
pg_t pgid(i, poolid);
ldout(cct,20) << __func__ << " removing merged " << pgid << dendl;
if (pgmap.pg_stat.count(pgid)) {
pending_inc->pg_remove.insert(pgid);
}
pending_inc->pg_stat_updates.erase(pgid);
}
}
}
auto i = pending_inc->pg_stat_updates.begin();
while (i != pending_inc->pg_stat_updates.end()) {
auto j = new_pg_num.find(i->first.pool());
if (j == new_pg_num.end() ||
i->first.ps() >= j->second) {
ldout(cct,20) << __func__ << " removing pending update to old "
<< i->first << dendl;
i = pending_inc->pg_stat_updates.erase(i);
} else {
++i;
}
}
}
static void _try_mark_pg_stale(
const OSDMap& osdmap,
pg_t pgid,
const pg_stat_t& cur,
PGMap::Incremental *pending_inc)
{
if ((cur.state & PG_STATE_STALE) == 0 &&
cur.acting_primary != -1 &&
osdmap.is_down(cur.acting_primary)) {
pg_stat_t *newstat;
auto q = pending_inc->pg_stat_updates.find(pgid);
if (q != pending_inc->pg_stat_updates.end()) {
if ((q->second.acting_primary == cur.acting_primary) ||
((q->second.state & PG_STATE_STALE) == 0 &&
q->second.acting_primary != -1 &&
osdmap.is_down(q->second.acting_primary))) {
newstat = &q->second;
} else {
// pending update is no longer down or already stale
return;
}
} else {
newstat = &pending_inc->pg_stat_updates[pgid];
*newstat = cur;
}
dout(10) << __func__ << " marking pg " << pgid
<< " stale (acting_primary " << newstat->acting_primary
<< ")" << dendl;
newstat->state |= PG_STATE_STALE;
newstat->last_unstale = ceph_clock_now();
}
}
void PGMapUpdater::check_down_pgs(
const OSDMap &osdmap,
const PGMap &pg_map,
bool check_all,
const set<int>& need_check_down_pg_osds,
PGMap::Incremental *pending_inc)
{
// if a large number of osds changed state, just iterate over the whole
// pg map.
if (need_check_down_pg_osds.size() > (unsigned)osdmap.get_num_osds() *
g_conf().get_val<double>("mon_pg_check_down_all_threshold")) {
check_all = true;
}
if (check_all) {
for (const auto& p : pg_map.pg_stat) {
_try_mark_pg_stale(osdmap, p.first, p.second, pending_inc);
}
} else {
for (auto osd : need_check_down_pg_osds) {
if (osdmap.is_down(osd)) {
auto p = pg_map.pg_by_osd.find(osd);
if (p == pg_map.pg_by_osd.end()) {
continue;
}
for (auto pgid : p->second) {
const pg_stat_t &stat = pg_map.pg_stat.at(pgid);
ceph_assert(stat.acting_primary == osd);
_try_mark_pg_stale(osdmap, pgid, stat, pending_inc);
}
}
}
}
}
int reweight::by_utilization(
const OSDMap &osdmap,
const PGMap &pgm,
int oload,
double max_changef,
int max_osds,
bool by_pg, const set<int64_t> *pools,
bool no_increasing,
mempool::osdmap::map<int32_t, uint32_t>* new_weights,
std::stringstream *ss,
std::string *out_str,
ceph::Formatter *f)
{
if (oload <= 100) {
*ss << "You must give a percentage higher than 100. "
"The reweighting threshold will be calculated as <average-utilization> "
"times <input-percentage>. For example, an argument of 200 would "
"reweight OSDs which are twice as utilized as the average OSD.\n";
return -EINVAL;
}
vector<int> pgs_by_osd(osdmap.get_max_osd());
// Avoid putting a small number (or 0) in the denominator when calculating
// average_util
double average_util;
if (by_pg) {
// by pg mapping
double weight_sum = 0.0; // sum up the crush weights
unsigned num_pg_copies = 0;
int num_osds = 0;
for (const auto& pg : pgm.pg_stat) {
if (pools && pools->count(pg.first.pool()) == 0)
continue;
for (const auto acting : pg.second.acting) {
if (!osdmap.exists(acting)) {
continue;
}
if (acting >= (int)pgs_by_osd.size())
pgs_by_osd.resize(acting);
if (pgs_by_osd[acting] == 0) {
if (osdmap.crush->get_item_weightf(acting) <= 0) {
//skip if we currently can not identify item
continue;
}
weight_sum += osdmap.crush->get_item_weightf(acting);
++num_osds;
}
++pgs_by_osd[acting];
++num_pg_copies;
}
}
if (!num_osds || (num_pg_copies / num_osds < g_conf()->mon_reweight_min_pgs_per_osd)) {
*ss << "Refusing to reweight: we only have " << num_pg_copies
<< " PGs across " << num_osds << " osds!\n";
return -EDOM;
}
average_util = (double)num_pg_copies / weight_sum;
} else {
// by osd utilization
int num_osd = std::max<size_t>(1, pgm.osd_stat.size());
if ((uint64_t)pgm.osd_sum.statfs.total / num_osd
< g_conf()->mon_reweight_min_bytes_per_osd) {
*ss << "Refusing to reweight: we only have " << pgm.osd_sum.statfs.kb()
<< " kb across all osds!\n";
return -EDOM;
}
if ((uint64_t)pgm.osd_sum.statfs.get_used_raw() / num_osd
< g_conf()->mon_reweight_min_bytes_per_osd) {
*ss << "Refusing to reweight: we only have "
<< pgm.osd_sum.statfs.kb_used_raw()
<< " kb used across all osds!\n";
return -EDOM;
}
average_util = (double)pgm.osd_sum.statfs.get_used_raw() /
(double)pgm.osd_sum.statfs.total;
}
// adjust down only if we are above the threshold
const double overload_util = average_util * (double)oload / 100.0;
// but aggressively adjust weights up whenever possible.
const double underload_util = average_util;
const unsigned max_change = (unsigned)(max_changef * (double)CEPH_OSD_IN);
ostringstream oss;
if (f) {
f->open_object_section("reweight_by_utilization");
f->dump_int("overload_min", oload);
f->dump_float("max_change", max_changef);
f->dump_int("max_change_osds", max_osds);
f->dump_float("average_utilization", average_util);
f->dump_float("overload_utilization", overload_util);
} else {
oss << "oload " << oload << "\n";
oss << "max_change " << max_changef << "\n";
oss << "max_change_osds " << max_osds << "\n";
oss.precision(4);
oss << "average_utilization " << std::fixed << average_util << "\n";
oss << "overload_utilization " << overload_util << "\n";
}
int num_changed = 0;
// precompute util for each OSD
std::vector<std::pair<int, float> > util_by_osd;
for (const auto& p : pgm.osd_stat) {
std::pair<int, float> osd_util;
osd_util.first = p.first;
if (by_pg) {
if (p.first >= (int)pgs_by_osd.size() ||
pgs_by_osd[p.first] == 0) {
// skip if this OSD does not contain any pg
// belonging to the specified pool(s).
continue;
}
if (osdmap.crush->get_item_weightf(p.first) <= 0) {
// skip if we are unable to locate item.
continue;
}
osd_util.second =
pgs_by_osd[p.first] / osdmap.crush->get_item_weightf(p.first);
} else {
osd_util.second =
(double)p.second.statfs.get_used_raw() / (double)p.second.statfs.total;
}
util_by_osd.push_back(osd_util);
}
// sort by absolute deviation from the mean utilization,
// in descending order.
std::sort(util_by_osd.begin(), util_by_osd.end(),
[average_util](std::pair<int, float> l, std::pair<int, float> r) {
return abs(l.second - average_util) > abs(r.second - average_util);
}
);
if (f)
f->open_array_section("reweights");
for (const auto& p : util_by_osd) {
unsigned weight = osdmap.get_weight(p.first);
if (weight == 0) {
// skip if OSD is currently out
continue;
}
float util = p.second;
if (util >= overload_util) {
// Assign a lower weight to overloaded OSDs. The current weight
// is a factor to take into account the original weights,
// to represent e.g. differing storage capacities
unsigned new_weight = (unsigned)((average_util / util) * (float)weight);
if (weight > max_change)
new_weight = std::max(new_weight, weight - max_change);
new_weights->insert({p.first, new_weight});
if (f) {
f->open_object_section("osd");
f->dump_int("osd", p.first);
f->dump_float("weight", (float)weight / (float)CEPH_OSD_IN);
f->dump_float("new_weight", (float)new_weight / (float)CEPH_OSD_IN);
f->close_section();
} else {
oss << "osd." << p.first << " weight "
<< (float)weight / (float)CEPH_OSD_IN << " -> "
<< (float)new_weight / (float)CEPH_OSD_IN << "\n";
}
if (++num_changed >= max_osds)
break;
}
if (!no_increasing && util <= underload_util) {
// assign a higher weight.. if we can.
unsigned new_weight = (unsigned)((average_util / util) * (float)weight);
new_weight = std::min(new_weight, weight + max_change);
if (new_weight > CEPH_OSD_IN)
new_weight = CEPH_OSD_IN;
if (new_weight > weight) {
new_weights->insert({p.first, new_weight});
oss << "osd." << p.first << " weight "
<< (float)weight / (float)CEPH_OSD_IN << " -> "
<< (float)new_weight / (float)CEPH_OSD_IN << "\n";
if (++num_changed >= max_osds)
break;
}
}
}
if (f) {
f->close_section();
}
OSDMap newmap;
newmap.deepish_copy_from(osdmap);
OSDMap::Incremental newinc;
newinc.fsid = newmap.get_fsid();
newinc.epoch = newmap.get_epoch() + 1;
newinc.new_weight = *new_weights;
newmap.apply_incremental(newinc);
osdmap.summarize_mapping_stats(&newmap, pools, out_str, f);
if (f) {
f->close_section();
} else {
*out_str += "\n";
*out_str += oss.str();
}
return num_changed;
}
| 132,123 | 31.201804 | 130 |
cc
|
null |
ceph-main/src/mon/PGMap.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/*
* Placement Group Map. Placement Groups are logical sets of objects
* that are replicated by the same set of devices. pgid=(r,hash(o)&m)
* where & is a bit-wise AND and m=2^k-1
*/
#ifndef CEPH_PGMAP_H
#define CEPH_PGMAP_H
#include "include/health.h"
#include "common/debug.h"
#include "common/TextTable.h"
#include "osd/osd_types.h"
#include "include/mempool.h"
#include "mon/health_check.h"
#include <sstream>
namespace ceph { class Formatter; }
class PGMapDigest {
public:
MEMPOOL_CLASS_HELPERS();
virtual ~PGMapDigest() {}
mempool::pgmap::vector<uint64_t> osd_last_seq;
mutable std::map<int, int64_t> avail_space_by_rule;
// aggregate state, populated by PGMap child
int64_t num_pg = 0, num_osd = 0;
int64_t num_pg_active = 0;
int64_t num_pg_unknown = 0;
mempool::pgmap::unordered_map<int32_t,pool_stat_t> pg_pool_sum;
mempool::pgmap::map<int64_t,int64_t> num_pg_by_pool;
pool_stat_t pg_sum;
osd_stat_t osd_sum;
mempool::pgmap::map<std::string,osd_stat_t> osd_sum_by_class;
mempool::pgmap::unordered_map<uint64_t,int32_t> num_pg_by_state;
struct pg_count {
int32_t acting = 0;
int32_t up_not_acting = 0;
int32_t primary = 0;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(acting, bl);
encode(up_not_acting, bl);
encode(primary, bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
using ceph::decode;
decode(acting, p);
decode(up_not_acting, p);
decode(primary, p);
}
};
mempool::pgmap::unordered_map<int32_t,pg_count> num_pg_by_osd;
mempool::pgmap::map<int64_t,interval_set<snapid_t>> purged_snaps;
bool use_per_pool_stats() const {
return osd_sum.num_osds == osd_sum.num_per_pool_osds;
}
bool use_per_pool_omap_stats() const {
return osd_sum.num_osds == osd_sum.num_per_pool_omap_osds;
}
// recent deltas, and summation
/**
* keep track of last deltas for each pool, calculated using
* @p pg_pool_sum as baseline.
*/
mempool::pgmap::unordered_map<int64_t, mempool::pgmap::list<std::pair<pool_stat_t, utime_t> > > per_pool_sum_deltas;
/**
* keep track of per-pool timestamp deltas, according to last update on
* each pool.
*/
mempool::pgmap::unordered_map<int64_t, utime_t> per_pool_sum_deltas_stamps;
/**
* keep track of sum deltas, per-pool, taking into account any previous
* deltas existing in @p per_pool_sum_deltas. The utime_t as second member
* of the pair is the timestamp referring to the last update (i.e., the first
* member of the pair) for a given pool.
*/
mempool::pgmap::unordered_map<int64_t, std::pair<pool_stat_t,utime_t> > per_pool_sum_delta;
pool_stat_t pg_sum_delta;
utime_t stamp_delta;
void get_recovery_stats(
double *misplaced_ratio,
double *degraded_ratio,
double *inactive_ratio,
double *unknown_pgs_ratio) const;
void print_summary(ceph::Formatter *f, std::ostream *out) const;
void print_oneline_summary(ceph::Formatter *f, std::ostream *out) const;
void recovery_summary(ceph::Formatter *f, std::list<std::string> *psl,
const pool_stat_t& pool_sum) const;
void overall_recovery_summary(ceph::Formatter *f, std::list<std::string> *psl) const;
void pool_recovery_summary(ceph::Formatter *f, std::list<std::string> *psl,
uint64_t poolid) const;
void recovery_rate_summary(ceph::Formatter *f, std::ostream *out,
const pool_stat_t& delta_sum,
utime_t delta_stamp) const;
void overall_recovery_rate_summary(ceph::Formatter *f, std::ostream *out) const;
void pool_recovery_rate_summary(ceph::Formatter *f, std::ostream *out,
uint64_t poolid) const;
/**
* Obtain a formatted/plain output for client I/O, source from stats for a
* given @p delta_sum pool over a given @p delta_stamp period of time.
*/
void client_io_rate_summary(ceph::Formatter *f, std::ostream *out,
const pool_stat_t& delta_sum,
utime_t delta_stamp) const;
/**
* Obtain a formatted/plain output for the overall client I/O, which is
* calculated resorting to @p pg_sum_delta and @p stamp_delta.
*/
void overall_client_io_rate_summary(ceph::Formatter *f, std::ostream *out) const;
/**
* Obtain a formatted/plain output for client I/O over a given pool
* with id @p pool_id. We will then obtain pool-specific data
* from @p per_pool_sum_delta.
*/
void pool_client_io_rate_summary(ceph::Formatter *f, std::ostream *out,
uint64_t poolid) const;
/**
* Obtain a formatted/plain output for cache tier IO, source from stats for a
* given @p delta_sum pool over a given @p delta_stamp period of time.
*/
void cache_io_rate_summary(ceph::Formatter *f, std::ostream *out,
const pool_stat_t& delta_sum,
utime_t delta_stamp) const;
/**
* Obtain a formatted/plain output for the overall cache tier IO, which is
* calculated resorting to @p pg_sum_delta and @p stamp_delta.
*/
void overall_cache_io_rate_summary(ceph::Formatter *f, std::ostream *out) const;
/**
* Obtain a formatted/plain output for cache tier IO over a given pool
* with id @p pool_id. We will then obtain pool-specific data
* from @p per_pool_sum_delta.
*/
void pool_cache_io_rate_summary(ceph::Formatter *f, std::ostream *out,
uint64_t poolid) const;
/**
* Return the number of additional bytes that can be stored in this
* pool before the first OSD fills up, accounting for PG overhead.
*/
int64_t get_pool_free_space(const OSDMap &osd_map, int64_t poolid) const;
/**
* Dump pool usage and io ops/bytes, used by "ceph df" command
*/
virtual void dump_pool_stats_full(const OSDMap &osd_map, std::stringstream *ss,
ceph::Formatter *f, bool verbose) const;
void dump_cluster_stats(std::stringstream *ss, ceph::Formatter *f, bool verbose) const;
static void dump_object_stat_sum(TextTable &tbl, ceph::Formatter *f,
const pool_stat_t &pool_stat,
uint64_t avail,
float raw_used_rate,
bool verbose,
bool per_pool,
bool per_pool_omap,
const pg_pool_t *pool);
size_t get_num_pg_by_osd(int osd) const {
auto p = num_pg_by_osd.find(osd);
if (p == num_pg_by_osd.end())
return 0;
else
return p->second.acting;
}
int get_num_primary_pg_by_osd(int osd) const {
auto p = num_pg_by_osd.find(osd);
if (p == num_pg_by_osd.end())
return 0;
else
return p->second.primary;
}
ceph_statfs get_statfs(OSDMap &osdmap,
std::optional<int64_t> data_pool) const;
int64_t get_rule_avail(int ruleno) const {
auto i = avail_space_by_rule.find(ruleno);
if (i != avail_space_by_rule.end())
return avail_space_by_rule[ruleno];
else
return 0;
}
// kill me post-mimic or -nautilus
bool definitely_converted_snapsets() const {
// false negative is okay; false positive is not!
return
num_pg &&
num_pg_unknown == 0 &&
pg_sum.stats.sum.num_legacy_snapsets == 0;
}
uint64_t get_last_osd_stat_seq(int osd) {
if (osd < (int)osd_last_seq.size())
return osd_last_seq[osd];
return 0;
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<PGMapDigest*>& ls);
};
WRITE_CLASS_ENCODER(PGMapDigest::pg_count);
WRITE_CLASS_ENCODER_FEATURES(PGMapDigest);
class PGMap : public PGMapDigest {
public:
MEMPOOL_CLASS_HELPERS();
// the map
version_t version;
epoch_t last_osdmap_epoch; // last osdmap epoch i applied to the pgmap
epoch_t last_pg_scan; // osdmap epoch
mempool::pgmap::unordered_map<int32_t,osd_stat_t> osd_stat;
mempool::pgmap::unordered_map<pg_t,pg_stat_t> pg_stat;
typedef mempool::pgmap::map<
std::pair<int64_t, int>, // <pool, osd>
store_statfs_t>
per_osd_pool_statfs_t;
per_osd_pool_statfs_t pool_statfs;
class Incremental {
public:
MEMPOOL_CLASS_HELPERS();
version_t version;
mempool::pgmap::map<pg_t,pg_stat_t> pg_stat_updates;
epoch_t osdmap_epoch;
epoch_t pg_scan; // osdmap epoch
mempool::pgmap::set<pg_t> pg_remove;
utime_t stamp;
per_osd_pool_statfs_t pool_statfs_updates;
private:
mempool::pgmap::map<int32_t,osd_stat_t> osd_stat_updates;
mempool::pgmap::set<int32_t> osd_stat_rm;
public:
const mempool::pgmap::map<int32_t, osd_stat_t> &get_osd_stat_updates() const {
return osd_stat_updates;
}
const mempool::pgmap::set<int32_t> &get_osd_stat_rm() const {
return osd_stat_rm;
}
template<typename OsdStat>
void update_stat(int32_t osd, OsdStat&& stat) {
osd_stat_updates[osd] = std::forward<OsdStat>(stat);
}
void stat_osd_out(int32_t osd) {
osd_stat_updates[osd] = osd_stat_t();
}
void stat_osd_down_up(int32_t osd, const PGMap& pg_map) {
// 0 the op_queue_age_hist for this osd
auto p = osd_stat_updates.find(osd);
if (p != osd_stat_updates.end()) {
p->second.op_queue_age_hist.clear();
return;
}
auto q = pg_map.osd_stat.find(osd);
if (q != pg_map.osd_stat.end()) {
osd_stat_t& t = osd_stat_updates[osd] = q->second;
t.op_queue_age_hist.clear();
}
}
void rm_stat(int32_t osd) {
osd_stat_rm.insert(osd);
osd_stat_updates.erase(osd);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<Incremental*>& o);
Incremental() : version(0), osdmap_epoch(0), pg_scan(0) {}
};
// aggregate stats (soft state), generated by calc_stats()
mempool::pgmap::unordered_map<int,std::set<pg_t> > pg_by_osd;
mempool::pgmap::unordered_map<int,int> blocked_by_sum;
mempool::pgmap::list<std::pair<pool_stat_t, utime_t> > pg_sum_deltas;
mempool::pgmap::unordered_map<int64_t,mempool::pgmap::unordered_map<uint64_t,int32_t>> num_pg_by_pool_state;
utime_t stamp;
void update_pool_deltas(
CephContext *cct,
const utime_t ts,
const mempool::pgmap::unordered_map<int32_t, pool_stat_t>& pg_pool_sum_old);
void clear_delta();
void deleted_pool(int64_t pool) {
for (auto i = pool_statfs.begin(); i != pool_statfs.end();) {
if (i->first.first == pool) {
i = pool_statfs.erase(i);
} else {
++i;
}
}
pg_pool_sum.erase(pool);
num_pg_by_pool_state.erase(pool);
num_pg_by_pool.erase(pool);
per_pool_sum_deltas.erase(pool);
per_pool_sum_deltas_stamps.erase(pool);
per_pool_sum_delta.erase(pool);
}
private:
void update_delta(
CephContext *cct,
const utime_t ts,
const pool_stat_t& old_pool_sum,
utime_t *last_ts,
const pool_stat_t& current_pool_sum,
pool_stat_t *result_pool_delta,
utime_t *result_ts_delta,
mempool::pgmap::list<std::pair<pool_stat_t,utime_t> > *delta_avg_list);
void update_one_pool_delta(CephContext *cct,
const utime_t ts,
const int64_t pool,
const pool_stat_t& old_pool_sum);
public:
mempool::pgmap::set<pg_t> creating_pgs;
mempool::pgmap::map<int,std::map<epoch_t,std::set<pg_t> > > creating_pgs_by_osd_epoch;
// Bits that use to be enum StuckPG
static const int STUCK_INACTIVE = (1<<0);
static const int STUCK_UNCLEAN = (1<<1);
static const int STUCK_UNDERSIZED = (1<<2);
static const int STUCK_DEGRADED = (1<<3);
static const int STUCK_STALE = (1<<4);
PGMap()
: version(0),
last_osdmap_epoch(0), last_pg_scan(0)
{}
version_t get_version() const {
return version;
}
void set_version(version_t v) {
version = v;
}
epoch_t get_last_osdmap_epoch() const {
return last_osdmap_epoch;
}
void set_last_osdmap_epoch(epoch_t e) {
last_osdmap_epoch = e;
}
epoch_t get_last_pg_scan() const {
return last_pg_scan;
}
void set_last_pg_scan(epoch_t e) {
last_pg_scan = e;
}
utime_t get_stamp() const {
return stamp;
}
void set_stamp(utime_t s) {
stamp = s;
}
pool_stat_t get_pg_pool_sum_stat(int64_t pool) const {
auto p = pg_pool_sum.find(pool);
if (p != pg_pool_sum.end())
return p->second;
return pool_stat_t();
}
osd_stat_t get_osd_sum(const std::set<int>& osds) const {
if (osds.empty()) // all
return osd_sum;
osd_stat_t sum;
for (auto i : osds) {
auto os = get_osd_stat(i);
if (os)
sum.add(*os);
}
return sum;
}
const osd_stat_t *get_osd_stat(int osd) const {
auto i = osd_stat.find(osd);
if (i == osd_stat.end()) {
return nullptr;
}
return &i->second;
}
void apply_incremental(CephContext *cct, const Incremental& inc);
void calc_stats();
void stat_pg_add(const pg_t &pgid, const pg_stat_t &s,
bool sameosds=false);
bool stat_pg_sub(const pg_t &pgid, const pg_stat_t &s,
bool sameosds=false);
void calc_purged_snaps();
void calc_osd_sum_by_class(const OSDMap& osdmap);
void stat_osd_add(int osd, const osd_stat_t &s);
void stat_osd_sub(int osd, const osd_stat_t &s);
void encode(ceph::buffer::list &bl, uint64_t features=-1) const;
void decode(ceph::buffer::list::const_iterator &bl);
/// encode subset of our data to a PGMapDigest
void encode_digest(const OSDMap& osdmap,
ceph::buffer::list& bl, uint64_t features);
int64_t get_rule_avail(const OSDMap& osdmap, int ruleno) const;
void get_rules_avail(const OSDMap& osdmap,
std::map<int,int64_t> *avail_map) const;
void dump(ceph::Formatter *f, bool with_net = true) const;
void dump_basic(ceph::Formatter *f) const;
void dump_pg_stats(ceph::Formatter *f, bool brief) const;
void dump_pg_progress(ceph::Formatter *f) const;
void dump_pool_stats(ceph::Formatter *f) const;
void dump_osd_stats(ceph::Formatter *f, bool with_net = true) const;
void dump_osd_ping_times(ceph::Formatter *f) const;
void dump_delta(ceph::Formatter *f) const;
void dump_filtered_pg_stats(ceph::Formatter *f, std::set<pg_t>& pgs) const;
void dump_pool_stats_full(const OSDMap &osd_map, std::stringstream *ss,
ceph::Formatter *f, bool verbose) const override {
get_rules_avail(osd_map, &avail_space_by_rule);
PGMapDigest::dump_pool_stats_full(osd_map, ss, f, verbose);
}
/*
* Dump client io rate, recovery io rate, cache io rate and recovery information.
* this function is used by "ceph osd pool stats" command
*/
void dump_pool_stats_and_io_rate(int64_t poolid, const OSDMap &osd_map, ceph::Formatter *f,
std::stringstream *ss) const;
static void dump_pg_stats_plain(
std::ostream& ss,
const mempool::pgmap::unordered_map<pg_t, pg_stat_t>& pg_stats,
bool brief);
void get_stuck_stats(
int types, const utime_t cutoff,
mempool::pgmap::unordered_map<pg_t, pg_stat_t>& stuck_pgs) const;
void dump_stuck(ceph::Formatter *f, int types, utime_t cutoff) const;
void dump_stuck_plain(std::ostream& ss, int types, utime_t cutoff) const;
int dump_stuck_pg_stats(std::stringstream &ds,
ceph::Formatter *f,
int threshold,
std::vector<std::string>& args) const;
void dump(std::ostream& ss) const;
void dump_basic(std::ostream& ss) const;
void dump_pg_stats(std::ostream& ss, bool brief) const;
void dump_pg_sum_stats(std::ostream& ss, bool header) const;
void dump_pool_stats(std::ostream& ss, bool header) const;
void dump_osd_stats(std::ostream& ss) const;
void dump_osd_sum_stats(std::ostream& ss) const;
void dump_filtered_pg_stats(std::ostream& ss, std::set<pg_t>& pgs) const;
void dump_osd_perf_stats(ceph::Formatter *f) const;
void print_osd_perf_stats(std::ostream *ss) const;
void dump_osd_blocked_by_stats(ceph::Formatter *f) const;
void print_osd_blocked_by_stats(std::ostream *ss) const;
void get_filtered_pg_stats(uint64_t state, int64_t poolid, int64_t osdid,
bool primary, std::set<pg_t>& pgs) const;
std::set<std::string> osd_parentage(const OSDMap& osdmap, int id) const;
void get_health_checks(
CephContext *cct,
const OSDMap& osdmap,
health_check_map_t *checks) const;
void print_summary(ceph::Formatter *f, std::ostream *out) const;
static void generate_test_instances(std::list<PGMap*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(PGMap)
inline std::ostream& operator<<(std::ostream& out, const PGMapDigest& m) {
m.print_oneline_summary(NULL, &out);
return out;
}
int process_pg_map_command(
const std::string& prefix,
const cmdmap_t& cmdmap,
const PGMap& pg_map,
const OSDMap& osdmap,
ceph::Formatter *f,
std::stringstream *ss,
ceph::buffer::list *odata);
class PGMapUpdater
{
public:
static void check_osd_map(
CephContext *cct,
const OSDMap &osdmap,
const PGMap& pg_map,
PGMap::Incremental *pending_inc);
// mark pg's state stale if its acting primary osd is down
static void check_down_pgs(
const OSDMap &osd_map,
const PGMap &pg_map,
bool check_all,
const std::set<int>& need_check_down_pg_osds,
PGMap::Incremental *pending_inc);
};
namespace reweight {
/* Assign a lower weight to overloaded OSDs.
*
* The osds that will get a lower weight are those with with a utilization
* percentage 'oload' percent greater than the average utilization.
*/
int by_utilization(const OSDMap &osd_map,
const PGMap &pg_map,
int oload,
double max_changef,
int max_osds,
bool by_pg, const std::set<int64_t> *pools,
bool no_increasing,
mempool::osdmap::map<int32_t, uint32_t>* new_weights,
std::stringstream *ss,
std::string *out_str,
ceph::Formatter *f);
}
#endif
| 18,391 | 31.960573 | 118 |
h
|
null |
ceph-main/src/mon/Paxos.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sstream>
#include "Paxos.h"
#include "Monitor.h"
#include "messages/MMonPaxos.h"
#include "mon/mon_types.h"
#include "common/config.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "common/Timer.h"
#include "messages/PaxosServiceMessage.h"
using std::string;
using std::unique_lock;
using ceph::bufferlist;
using ceph::Formatter;
using ceph::JSONFormatter;
using ceph::to_timespan;
#define dout_subsys ceph_subsys_paxos
#undef dout_prefix
#define dout_prefix _prefix(_dout, mon, mon.name, mon.rank, paxos_name, state, first_committed, last_committed)
static std::ostream& _prefix(std::ostream *_dout, Monitor &mon, const string& name,
int rank, const string& paxos_name, int state,
version_t first_committed, version_t last_committed)
{
return *_dout << "mon." << name << "@" << rank
<< "(" << mon.get_state_name() << ")"
<< ".paxos(" << paxos_name << " " << Paxos::get_statename(state)
<< " c " << first_committed << ".." << last_committed
<< ") ";
}
class Paxos::C_Trimmed : public Context {
Paxos *paxos;
public:
explicit C_Trimmed(Paxos *p) : paxos(p) { }
void finish(int r) override {
paxos->trimming = false;
}
};
MonitorDBStore *Paxos::get_store()
{
return mon.store;
}
void Paxos::read_and_prepare_transactions(MonitorDBStore::TransactionRef tx,
version_t first, version_t last)
{
dout(10) << __func__ << " first " << first << " last " << last << dendl;
for (version_t v = first; v <= last; ++v) {
dout(30) << __func__ << " apply version " << v << dendl;
bufferlist bl;
int err = get_store()->get(get_name(), v, bl);
ceph_assert(err == 0);
ceph_assert(bl.length());
decode_append_transaction(tx, bl);
}
dout(15) << __func__ << " total versions " << (last-first) << dendl;
}
void Paxos::init()
{
// load paxos variables from stable storage
last_pn = get_store()->get(get_name(), "last_pn");
accepted_pn = get_store()->get(get_name(), "accepted_pn");
last_committed = get_store()->get(get_name(), "last_committed");
first_committed = get_store()->get(get_name(), "first_committed");
dout(10) << __func__ << " last_pn: " << last_pn << " accepted_pn: "
<< accepted_pn << " last_committed: " << last_committed
<< " first_committed: " << first_committed << dendl;
dout(10) << "init" << dendl;
ceph_assert(is_consistent());
}
void Paxos::init_logger()
{
PerfCountersBuilder pcb(g_ceph_context, "paxos", l_paxos_first, l_paxos_last);
// Because monitors are so few in number, the resource cost of capturing
// almost all their perf counters at USEFUL is trivial.
pcb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
pcb.add_u64_counter(l_paxos_start_leader, "start_leader", "Starts in leader role");
pcb.add_u64_counter(l_paxos_start_peon, "start_peon", "Starts in peon role");
pcb.add_u64_counter(l_paxos_restart, "restart", "Restarts");
pcb.add_u64_counter(l_paxos_refresh, "refresh", "Refreshes");
pcb.add_time_avg(l_paxos_refresh_latency, "refresh_latency", "Refresh latency");
pcb.add_u64_counter(l_paxos_begin, "begin", "Started and handled begins");
pcb.add_u64_avg(l_paxos_begin_keys, "begin_keys", "Keys in transaction on begin");
pcb.add_u64_avg(l_paxos_begin_bytes, "begin_bytes", "Data in transaction on begin", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_time_avg(l_paxos_begin_latency, "begin_latency", "Latency of begin operation");
pcb.add_u64_counter(l_paxos_commit, "commit",
"Commits", "cmt");
pcb.add_u64_avg(l_paxos_commit_keys, "commit_keys", "Keys in transaction on commit");
pcb.add_u64_avg(l_paxos_commit_bytes, "commit_bytes", "Data in transaction on commit", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_time_avg(l_paxos_commit_latency, "commit_latency",
"Commit latency", "clat");
pcb.add_u64_counter(l_paxos_collect, "collect", "Peon collects");
pcb.add_u64_avg(l_paxos_collect_keys, "collect_keys", "Keys in transaction on peon collect");
pcb.add_u64_avg(l_paxos_collect_bytes, "collect_bytes", "Data in transaction on peon collect", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_time_avg(l_paxos_collect_latency, "collect_latency", "Peon collect latency");
pcb.add_u64_counter(l_paxos_collect_uncommitted, "collect_uncommitted", "Uncommitted values in started and handled collects");
pcb.add_u64_counter(l_paxos_collect_timeout, "collect_timeout", "Collect timeouts");
pcb.add_u64_counter(l_paxos_accept_timeout, "accept_timeout", "Accept timeouts");
pcb.add_u64_counter(l_paxos_lease_ack_timeout, "lease_ack_timeout", "Lease acknowledgement timeouts");
pcb.add_u64_counter(l_paxos_lease_timeout, "lease_timeout", "Lease timeouts");
pcb.add_u64_counter(l_paxos_store_state, "store_state", "Store a shared state on disk");
pcb.add_u64_avg(l_paxos_store_state_keys, "store_state_keys", "Keys in transaction in stored state");
pcb.add_u64_avg(l_paxos_store_state_bytes, "store_state_bytes", "Data in transaction in stored state", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_time_avg(l_paxos_store_state_latency, "store_state_latency", "Storing state latency");
pcb.add_u64_counter(l_paxos_share_state, "share_state", "Sharings of state");
pcb.add_u64_avg(l_paxos_share_state_keys, "share_state_keys", "Keys in shared state");
pcb.add_u64_avg(l_paxos_share_state_bytes, "share_state_bytes", "Data in shared state", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_u64_counter(l_paxos_new_pn, "new_pn", "New proposal number queries");
pcb.add_time_avg(l_paxos_new_pn_latency, "new_pn_latency", "New proposal number getting latency");
logger = pcb.create_perf_counters();
g_ceph_context->get_perfcounters_collection()->add(logger);
}
void Paxos::dump_info(Formatter *f)
{
f->open_object_section("paxos");
f->dump_unsigned("first_committed", first_committed);
f->dump_unsigned("last_committed", last_committed);
f->dump_unsigned("last_pn", last_pn);
f->dump_unsigned("accepted_pn", accepted_pn);
f->close_section();
}
// ---------------------------------
// PHASE 1
// leader
void Paxos::collect(version_t oldpn)
{
// we're recoverying, it seems!
state = STATE_RECOVERING;
ceph_assert(mon.is_leader());
// reset the number of lasts received
uncommitted_v = 0;
uncommitted_pn = 0;
uncommitted_value.clear();
peer_first_committed.clear();
peer_last_committed.clear();
// look for uncommitted value
if (get_store()->exists(get_name(), last_committed+1)) {
version_t v = get_store()->get(get_name(), "pending_v");
version_t pn = get_store()->get(get_name(), "pending_pn");
if (v && pn && v == last_committed + 1) {
uncommitted_pn = pn;
} else {
dout(10) << "WARNING: no pending_pn on disk, using previous accepted_pn " << accepted_pn
<< " and crossing our fingers" << dendl;
uncommitted_pn = accepted_pn;
}
uncommitted_v = last_committed+1;
get_store()->get(get_name(), last_committed+1, uncommitted_value);
ceph_assert(uncommitted_value.length());
dout(10) << "learned uncommitted " << (last_committed+1)
<< " pn " << uncommitted_pn
<< " (" << uncommitted_value.length() << " bytes) from myself"
<< dendl;
logger->inc(l_paxos_collect_uncommitted);
}
// pick new pn
accepted_pn = get_new_proposal_number(std::max(accepted_pn, oldpn));
accepted_pn_from = last_committed;
num_last = 1;
dout(10) << "collect with pn " << accepted_pn << dendl;
// send collect
for (auto p = mon.get_quorum().begin();
p != mon.get_quorum().end();
++p) {
if (*p == mon.rank) continue;
MMonPaxos *collect = new MMonPaxos(mon.get_epoch(), MMonPaxos::OP_COLLECT,
ceph_clock_now());
collect->last_committed = last_committed;
collect->first_committed = first_committed;
collect->pn = accepted_pn;
mon.send_mon_message(collect, *p);
}
// set timeout event
collect_timeout_event = mon.timer.add_event_after(
g_conf()->mon_accept_timeout_factor *
g_conf()->mon_lease,
new C_MonContext{&mon, [this](int r) {
if (r == -ECANCELED)
return;
collect_timeout();
}});
}
// peon
void Paxos::handle_collect(MonOpRequestRef op)
{
op->mark_paxos_event("handle_collect");
auto collect = op->get_req<MMonPaxos>();
dout(10) << "handle_collect " << *collect << dendl;
ceph_assert(mon.is_peon()); // mon epoch filter should catch strays
// we're recoverying, it seems!
state = STATE_RECOVERING;
//update the peon recovery timeout
reset_lease_timeout();
if (collect->first_committed > last_committed+1) {
dout(2) << __func__
<< " leader's lowest version is too high for our last committed"
<< " (theirs: " << collect->first_committed
<< "; ours: " << last_committed << ") -- bootstrap!" << dendl;
op->mark_paxos_event("need to bootstrap");
mon.bootstrap();
return;
}
// reply
MMonPaxos *last = new MMonPaxos(mon.get_epoch(), MMonPaxos::OP_LAST,
ceph_clock_now());
last->last_committed = last_committed;
last->first_committed = first_committed;
version_t previous_pn = accepted_pn;
// can we accept this pn?
if (collect->pn > accepted_pn) {
// ok, accept it
accepted_pn = collect->pn;
accepted_pn_from = collect->pn_from;
dout(10) << "accepting pn " << accepted_pn << " from "
<< accepted_pn_from << dendl;
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->put(get_name(), "accepted_pn", accepted_pn);
dout(30) << __func__ << " transaction dump:\n";
JSONFormatter f(true);
t->dump(&f);
f.flush(*_dout);
*_dout << dendl;
logger->inc(l_paxos_collect);
logger->inc(l_paxos_collect_keys, t->get_keys());
logger->inc(l_paxos_collect_bytes, t->get_bytes());
auto start = ceph::coarse_mono_clock::now();
get_store()->apply_transaction(t);
auto end = ceph::coarse_mono_clock::now();
logger->tinc(l_paxos_collect_latency, to_timespan(end - start));
} else {
// don't accept!
dout(10) << "NOT accepting pn " << collect->pn << " from " << collect->pn_from
<< ", we already accepted " << accepted_pn
<< " from " << accepted_pn_from << dendl;
}
last->pn = accepted_pn;
last->pn_from = accepted_pn_from;
// share whatever committed values we have
if (collect->last_committed < last_committed)
share_state(last, collect->first_committed, collect->last_committed);
// do we have an accepted but uncommitted value?
// (it'll be at last_committed+1)
bufferlist bl;
if (collect->last_committed <= last_committed &&
get_store()->exists(get_name(), last_committed+1)) {
get_store()->get(get_name(), last_committed+1, bl);
ceph_assert(bl.length() > 0);
dout(10) << " sharing our accepted but uncommitted value for "
<< last_committed+1 << " (" << bl.length() << " bytes)" << dendl;
last->values[last_committed+1] = bl;
version_t v = get_store()->get(get_name(), "pending_v");
version_t pn = get_store()->get(get_name(), "pending_pn");
if (v && pn && v == last_committed + 1) {
last->uncommitted_pn = pn;
} else {
// previously we didn't record which pn a value was accepted
// under! use the pn value we just had... :(
dout(10) << "WARNING: no pending_pn on disk, using previous accepted_pn " << previous_pn
<< " and crossing our fingers" << dendl;
last->uncommitted_pn = previous_pn;
}
logger->inc(l_paxos_collect_uncommitted);
}
// send reply
collect->get_connection()->send_message(last);
}
/**
* @note This is Okay. We share our versions between peer_last_committed and
* our last_committed (inclusive), and add their bufferlists to the
* message. It will be the peer's job to apply them to its store, as
* these bufferlists will contain raw transactions.
* This function is called by both the Peon and the Leader. The Peon will
* share the state with the Leader during handle_collect(), sharing any
* values the leader may be missing (i.e., the leader's last_committed is
* lower than the peon's last_committed). The Leader will share the state
* with the Peon during handle_last(), if the peon's last_committed is
* lower than the leader's last_committed.
*/
void Paxos::share_state(MMonPaxos *m, version_t peer_first_committed,
version_t peer_last_committed)
{
ceph_assert(peer_last_committed < last_committed);
dout(10) << "share_state peer has fc " << peer_first_committed
<< " lc " << peer_last_committed << dendl;
version_t v = peer_last_committed + 1;
// include incrementals
uint64_t bytes = 0;
for ( ; v <= last_committed; v++) {
if (get_store()->exists(get_name(), v)) {
get_store()->get(get_name(), v, m->values[v]);
ceph_assert(m->values[v].length());
dout(10) << " sharing " << v << " ("
<< m->values[v].length() << " bytes)" << dendl;
bytes += m->values[v].length() + 16; // paxos_ + 10 digits = 16
}
}
logger->inc(l_paxos_share_state);
logger->inc(l_paxos_share_state_keys, m->values.size());
logger->inc(l_paxos_share_state_bytes, bytes);
m->last_committed = last_committed;
}
/**
* Store on disk a state that was shared with us
*
* Basically, we received a set of version. Or just one. It doesn't matter.
* What matters is that we have to stash it in the store. So, we will simply
* write every single bufferlist into their own versions on our side (i.e.,
* onto paxos-related keys), and then we will decode those same bufferlists
* we just wrote and apply the transactions they hold. We will also update
* our first and last committed values to point to the new values, if need
* be. All all this is done tightly wrapped in a transaction to ensure we
* enjoy the atomicity guarantees given by our awesome k/v store.
*/
bool Paxos::store_state(MMonPaxos *m)
{
auto t(std::make_shared<MonitorDBStore::Transaction>());
auto start = m->values.begin();
bool changed = false;
// build map of values to store
// we want to write the range [last_committed, m->last_committed] only.
if (start != m->values.end() &&
start->first > last_committed + 1) {
// ignore everything if values start in the future.
dout(10) << "store_state ignoring all values, they start at " << start->first
<< " > last_committed+1" << dendl;
return false;
}
// push forward the start position on the message's values iterator, up until
// we run out of positions or we find a position matching 'last_committed'.
while (start != m->values.end() && start->first <= last_committed) {
++start;
}
// make sure we get the right interval of values to apply by pushing forward
// the 'end' iterator until it matches the message's 'last_committed'.
auto end = start;
while (end != m->values.end() && end->first <= m->last_committed) {
last_committed = end->first;
++end;
}
if (start == end) {
dout(10) << "store_state nothing to commit" << dendl;
} else {
dout(10) << "store_state [" << start->first << ".."
<< last_committed << "]" << dendl;
t->put(get_name(), "last_committed", last_committed);
// we should apply the state here -- decode every single bufferlist in the
// map and append the transactions to 't'.
for (auto it = start; it != end; ++it) {
// write the bufferlist as the version's value
t->put(get_name(), it->first, it->second);
// decode the bufferlist and append it to the transaction we will shortly
// apply.
decode_append_transaction(t, it->second);
}
// discard obsolete uncommitted value?
if (uncommitted_v && uncommitted_v <= last_committed) {
dout(10) << " forgetting obsolete uncommitted value " << uncommitted_v
<< " pn " << uncommitted_pn << dendl;
uncommitted_v = 0;
uncommitted_pn = 0;
uncommitted_value.clear();
}
}
if (!t->empty()) {
dout(30) << __func__ << " transaction dump:\n";
JSONFormatter f(true);
t->dump(&f);
f.flush(*_dout);
*_dout << dendl;
logger->inc(l_paxos_store_state);
logger->inc(l_paxos_store_state_bytes, t->get_bytes());
logger->inc(l_paxos_store_state_keys, t->get_keys());
auto start = ceph::coarse_mono_clock::now();
get_store()->apply_transaction(t);
auto end = ceph::coarse_mono_clock::now();
logger->tinc(l_paxos_store_state_latency, to_timespan(end-start));
// refresh first_committed; this txn may have trimmed.
first_committed = get_store()->get(get_name(), "first_committed");
_sanity_check_store();
changed = true;
}
return changed;
}
void Paxos::_sanity_check_store()
{
version_t lc = get_store()->get(get_name(), "last_committed");
ceph_assert(lc == last_committed);
}
// leader
void Paxos::handle_last(MonOpRequestRef op)
{
op->mark_paxos_event("handle_last");
auto last = op->get_req<MMonPaxos>();
bool need_refresh = false;
int from = last->get_source().num();
dout(10) << "handle_last " << *last << dendl;
if (!mon.is_leader()) {
dout(10) << "not leader, dropping" << dendl;
return;
}
// note peer's first_ and last_committed, in case we learn a new
// commit and need to push it to them.
peer_first_committed[from] = last->first_committed;
peer_last_committed[from] = last->last_committed;
if (last->first_committed > last_committed + 1) {
dout(5) << __func__
<< " mon." << from
<< " lowest version is too high for our last committed"
<< " (theirs: " << last->first_committed
<< "; ours: " << last_committed << ") -- bootstrap!" << dendl;
op->mark_paxos_event("need to bootstrap");
mon.bootstrap();
return;
}
ceph_assert(g_conf()->paxos_kill_at != 1);
// store any committed values if any are specified in the message
need_refresh = store_state(last);
ceph_assert(g_conf()->paxos_kill_at != 2);
// is everyone contiguous and up to date?
for (auto p = peer_last_committed.begin();
p != peer_last_committed.end();
++p) {
if (p->second + 1 < first_committed && first_committed > 1) {
dout(5) << __func__
<< " peon " << p->first
<< " last_committed (" << p->second
<< ") is too low for our first_committed (" << first_committed
<< ") -- bootstrap!" << dendl;
op->mark_paxos_event("need to bootstrap");
mon.bootstrap();
return;
}
if (p->second < last_committed) {
// share committed values
dout(10) << " sending commit to mon." << p->first << dendl;
MMonPaxos *commit = new MMonPaxos(mon.get_epoch(),
MMonPaxos::OP_COMMIT,
ceph_clock_now());
share_state(commit, peer_first_committed[p->first], p->second);
mon.send_mon_message(commit, p->first);
}
}
// do they accept your pn?
if (last->pn > accepted_pn) {
// no, try again.
dout(10) << " they had a higher pn than us, picking a new one." << dendl;
// cancel timeout event
mon.timer.cancel_event(collect_timeout_event);
collect_timeout_event = 0;
collect(last->pn);
} else if (last->pn == accepted_pn) {
// yes, they accepted our pn. great.
num_last++;
dout(10) << " they accepted our pn, we now have "
<< num_last << " peons" << dendl;
// did this person send back an accepted but uncommitted value?
if (last->uncommitted_pn) {
if (last->uncommitted_pn >= uncommitted_pn &&
last->last_committed >= last_committed &&
last->last_committed + 1 >= uncommitted_v) {
uncommitted_v = last->last_committed+1;
uncommitted_pn = last->uncommitted_pn;
uncommitted_value = last->values[uncommitted_v];
dout(10) << "we learned an uncommitted value for " << uncommitted_v
<< " pn " << uncommitted_pn
<< " " << uncommitted_value.length() << " bytes"
<< dendl;
} else {
dout(10) << "ignoring uncommitted value for " << (last->last_committed+1)
<< " pn " << last->uncommitted_pn
<< " " << last->values[last->last_committed+1].length() << " bytes"
<< dendl;
}
}
// is that everyone?
if (num_last == mon.get_quorum().size()) {
// cancel timeout event
mon.timer.cancel_event(collect_timeout_event);
collect_timeout_event = 0;
peer_first_committed.clear();
peer_last_committed.clear();
// almost...
// did we learn an old value?
if (uncommitted_v == last_committed+1 &&
uncommitted_value.length()) {
dout(10) << "that's everyone. begin on old learned value" << dendl;
state = STATE_UPDATING_PREVIOUS;
begin(uncommitted_value);
} else {
// active!
dout(10) << "that's everyone. active!" << dendl;
extend_lease();
need_refresh = false;
if (do_refresh()) {
finish_round();
}
}
}
} else {
// no, this is an old message, discard
dout(10) << "old pn, ignoring" << dendl;
}
if (need_refresh)
(void)do_refresh();
}
void Paxos::collect_timeout()
{
dout(1) << "collect timeout, calling fresh election" << dendl;
collect_timeout_event = 0;
logger->inc(l_paxos_collect_timeout);
ceph_assert(mon.is_leader());
mon.bootstrap();
}
// leader
void Paxos::begin(bufferlist& v)
{
dout(10) << "begin for " << last_committed+1 << " "
<< v.length() << " bytes"
<< dendl;
ceph_assert(mon.is_leader());
ceph_assert(is_updating() || is_updating_previous());
// we must already have a majority for this to work.
ceph_assert(mon.get_quorum().size() == 1 ||
num_last > (unsigned)mon.monmap->size()/2);
// and no value, yet.
ceph_assert(new_value.length() == 0);
// accept it ourselves
accepted.clear();
accepted.insert(mon.rank);
new_value = v;
if (last_committed == 0) {
auto t(std::make_shared<MonitorDBStore::Transaction>());
// initial base case; set first_committed too
t->put(get_name(), "first_committed", 1);
decode_append_transaction(t, new_value);
bufferlist tx_bl;
t->encode(tx_bl);
new_value = tx_bl;
}
// store the proposed value in the store. IF it is accepted, we will then
// have to decode it into a transaction and apply it.
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->put(get_name(), last_committed+1, new_value);
// note which pn this pending value is for.
t->put(get_name(), "pending_v", last_committed + 1);
t->put(get_name(), "pending_pn", accepted_pn);
dout(30) << __func__ << " transaction dump:\n";
JSONFormatter f(true);
t->dump(&f);
f.flush(*_dout);
auto debug_tx(std::make_shared<MonitorDBStore::Transaction>());
auto new_value_it = new_value.cbegin();
debug_tx->decode(new_value_it);
debug_tx->dump(&f);
*_dout << "\nbl dump:\n";
f.flush(*_dout);
*_dout << dendl;
logger->inc(l_paxos_begin);
logger->inc(l_paxos_begin_keys, t->get_keys());
logger->inc(l_paxos_begin_bytes, t->get_bytes());
auto start = ceph::coarse_mono_clock::now();
get_store()->apply_transaction(t);
auto end = ceph::coarse_mono_clock::now();
logger->tinc(l_paxos_begin_latency, to_timespan(end - start));
ceph_assert(g_conf()->paxos_kill_at != 3);
if (mon.get_quorum().size() == 1) {
// we're alone, take it easy
commit_start();
return;
}
// ask others to accept it too!
for (auto p = mon.get_quorum().begin();
p != mon.get_quorum().end();
++p) {
if (*p == mon.rank) continue;
dout(10) << " sending begin to mon." << *p << dendl;
MMonPaxos *begin = new MMonPaxos(mon.get_epoch(), MMonPaxos::OP_BEGIN,
ceph_clock_now());
begin->values[last_committed+1] = new_value;
begin->last_committed = last_committed;
begin->pn = accepted_pn;
mon.send_mon_message(begin, *p);
}
// set timeout event
accept_timeout_event = mon.timer.add_event_after(
g_conf()->mon_accept_timeout_factor * g_conf()->mon_lease,
new C_MonContext{&mon, [this](int r) {
if (r == -ECANCELED)
return;
accept_timeout();
}});
}
// peon
void Paxos::handle_begin(MonOpRequestRef op)
{
op->mark_paxos_event("handle_begin");
auto begin = op->get_req<MMonPaxos>();
dout(10) << "handle_begin " << *begin << dendl;
// can we accept this?
if (begin->pn < accepted_pn) {
dout(10) << " we accepted a higher pn " << accepted_pn << ", ignoring" << dendl;
op->mark_paxos_event("have higher pn, ignore");
return;
}
ceph_assert(begin->pn == accepted_pn);
ceph_assert(begin->last_committed == last_committed);
ceph_assert(g_conf()->paxos_kill_at != 4);
logger->inc(l_paxos_begin);
// set state.
state = STATE_UPDATING;
lease_expire = {}; // cancel lease
// yes.
version_t v = last_committed+1;
dout(10) << "accepting value for " << v << " pn " << accepted_pn << dendl;
// store the accepted value onto our store. We will have to decode it and
// apply its transaction once we receive permission to commit.
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->put(get_name(), v, begin->values[v]);
// note which pn this pending value is for.
t->put(get_name(), "pending_v", v);
t->put(get_name(), "pending_pn", accepted_pn);
dout(30) << __func__ << " transaction dump:\n";
JSONFormatter f(true);
t->dump(&f);
f.flush(*_dout);
*_dout << dendl;
logger->inc(l_paxos_begin_bytes, t->get_bytes());
auto start = ceph::coarse_mono_clock::now();
get_store()->apply_transaction(t);
auto end = ceph::coarse_mono_clock::now();
logger->tinc(l_paxos_begin_latency, to_timespan(end - start));
ceph_assert(g_conf()->paxos_kill_at != 5);
// reply
MMonPaxos *accept = new MMonPaxos(mon.get_epoch(), MMonPaxos::OP_ACCEPT,
ceph_clock_now());
accept->pn = accepted_pn;
accept->last_committed = last_committed;
begin->get_connection()->send_message(accept);
}
// leader
void Paxos::handle_accept(MonOpRequestRef op)
{
op->mark_paxos_event("handle_accept");
auto accept = op->get_req<MMonPaxos>();
dout(10) << "handle_accept " << *accept << dendl;
int from = accept->get_source().num();
if (accept->pn != accepted_pn) {
// we accepted a higher pn, from some other leader
dout(10) << " we accepted a higher pn " << accepted_pn << ", ignoring" << dendl;
op->mark_paxos_event("have higher pn, ignore");
return;
}
if (last_committed > 0 &&
accept->last_committed < last_committed-1) {
dout(10) << " this is from an old round, ignoring" << dendl;
op->mark_paxos_event("old round, ignore");
return;
}
ceph_assert(accept->last_committed == last_committed || // not committed
accept->last_committed == last_committed-1); // committed
ceph_assert(is_updating() || is_updating_previous());
ceph_assert(accepted.count(from) == 0);
accepted.insert(from);
dout(10) << " now " << accepted << " have accepted" << dendl;
ceph_assert(g_conf()->paxos_kill_at != 6);
// only commit (and expose committed state) when we get *all* quorum
// members to accept. otherwise, they may still be sharing the now
// stale state.
// FIXME: we can improve this with an additional lease revocation message
// that doesn't block for the persist.
if (accepted == mon.get_quorum()) {
// yay, commit!
dout(10) << " got majority, committing, done with update" << dendl;
op->mark_paxos_event("commit_start");
commit_start();
}
}
void Paxos::accept_timeout()
{
dout(1) << "accept timeout, calling fresh election" << dendl;
accept_timeout_event = 0;
ceph_assert(mon.is_leader());
ceph_assert(is_updating() || is_updating_previous() || is_writing() ||
is_writing_previous());
logger->inc(l_paxos_accept_timeout);
mon.bootstrap();
}
struct C_Committed : public Context {
Paxos *paxos;
explicit C_Committed(Paxos *p) : paxos(p) {}
void finish(int r) override {
ceph_assert(r >= 0);
std::lock_guard l(paxos->mon.lock);
if (paxos->is_shutdown()) {
paxos->abort_commit();
return;
}
paxos->commit_finish();
}
};
void Paxos::abort_commit()
{
ceph_assert(commits_started > 0);
--commits_started;
if (commits_started == 0)
shutdown_cond.notify_all();
}
void Paxos::commit_start()
{
dout(10) << __func__ << " " << (last_committed+1) << dendl;
ceph_assert(g_conf()->paxos_kill_at != 7);
auto t(std::make_shared<MonitorDBStore::Transaction>());
// commit locally
t->put(get_name(), "last_committed", last_committed + 1);
// decode the value and apply its transaction to the store.
// this value can now be read from last_committed.
decode_append_transaction(t, new_value);
dout(30) << __func__ << " transaction dump:\n";
JSONFormatter f(true);
t->dump(&f);
f.flush(*_dout);
*_dout << dendl;
logger->inc(l_paxos_commit);
logger->inc(l_paxos_commit_keys, t->get_keys());
logger->inc(l_paxos_commit_bytes, t->get_bytes());
commit_start_stamp = ceph_clock_now();
get_store()->queue_transaction(t, new C_Committed(this));
if (is_updating_previous())
state = STATE_WRITING_PREVIOUS;
else if (is_updating())
state = STATE_WRITING;
else
ceph_abort();
++commits_started;
if (mon.get_quorum().size() > 1) {
// cancel timeout event
mon.timer.cancel_event(accept_timeout_event);
accept_timeout_event = 0;
}
}
void Paxos::commit_finish()
{
dout(20) << __func__ << " " << (last_committed+1) << dendl;
utime_t end = ceph_clock_now();
logger->tinc(l_paxos_commit_latency, end - commit_start_stamp);
ceph_assert(g_conf()->paxos_kill_at != 8);
// cancel lease - it was for the old value.
// (this would only happen if message layer lost the 'begin', but
// leader still got a majority and committed with out us.)
lease_expire = {}; // cancel lease
last_committed++;
last_commit_time = ceph_clock_now();
// refresh first_committed; this txn may have trimmed.
first_committed = get_store()->get(get_name(), "first_committed");
_sanity_check_store();
// tell everyone
for (auto p = mon.get_quorum().begin();
p != mon.get_quorum().end();
++p) {
if (*p == mon.rank) continue;
dout(10) << " sending commit to mon." << *p << dendl;
MMonPaxos *commit = new MMonPaxos(mon.get_epoch(), MMonPaxos::OP_COMMIT,
ceph_clock_now());
commit->values[last_committed] = new_value;
commit->pn = accepted_pn;
commit->last_committed = last_committed;
mon.send_mon_message(commit, *p);
}
ceph_assert(g_conf()->paxos_kill_at != 9);
// get ready for a new round.
new_value.clear();
// WRITING -> REFRESH
// among other things, this lets do_refresh() -> mon.bootstrap() ->
// wait_for_paxos_write() know that it doesn't need to flush the store
// queue. and it should not, as we are in the async completion thread now!
ceph_assert(is_writing() || is_writing_previous());
state = STATE_REFRESH;
ceph_assert(commits_started > 0);
--commits_started;
if (do_refresh()) {
commit_proposal();
if (mon.get_quorum().size() > 1) {
extend_lease();
}
ceph_assert(g_conf()->paxos_kill_at != 10);
finish_round();
}
}
void Paxos::handle_commit(MonOpRequestRef op)
{
op->mark_paxos_event("handle_commit");
auto commit = op->get_req<MMonPaxos>();
dout(10) << "handle_commit on " << commit->last_committed << dendl;
logger->inc(l_paxos_commit);
if (!mon.is_peon()) {
dout(10) << "not a peon, dropping" << dendl;
ceph_abort();
return;
}
op->mark_paxos_event("store_state");
store_state(commit);
(void)do_refresh();
}
void Paxos::extend_lease()
{
ceph_assert(mon.is_leader());
//assert(is_active());
lease_expire = ceph::real_clock::now();
lease_expire += ceph::make_timespan(g_conf()->mon_lease);
acked_lease.clear();
acked_lease.insert(mon.rank);
dout(7) << "extend_lease now+" << g_conf()->mon_lease
<< " (" << lease_expire << ")" << dendl;
// bcast
for (auto p = mon.get_quorum().begin();
p != mon.get_quorum().end(); ++p) {
if (*p == mon.rank) continue;
MMonPaxos *lease = new MMonPaxos(mon.get_epoch(), MMonPaxos::OP_LEASE,
ceph_clock_now());
lease->last_committed = last_committed;
lease->lease_timestamp = utime_t{lease_expire};
lease->first_committed = first_committed;
mon.send_mon_message(lease, *p);
}
// set timeout event.
// if old timeout is still in place, leave it.
if (!lease_ack_timeout_event) {
lease_ack_timeout_event = mon.timer.add_event_after(
g_conf()->mon_lease_ack_timeout_factor * g_conf()->mon_lease,
new C_MonContext{&mon, [this](int r) {
if (r == -ECANCELED)
return;
lease_ack_timeout();
}});
}
// set renew event
auto at = lease_expire;
at -= ceph::make_timespan(g_conf()->mon_lease);
at += ceph::make_timespan(g_conf()->mon_lease_renew_interval_factor *
g_conf()->mon_lease);
lease_renew_event = mon.timer.add_event_at(
at, new C_MonContext{&mon, [this](int r) {
if (r == -ECANCELED)
return;
lease_renew_timeout();
}});
}
void Paxos::warn_on_future_time(utime_t t, entity_name_t from)
{
utime_t now = ceph_clock_now();
if (t > now) {
utime_t diff = t - now;
if (diff > g_conf()->mon_clock_drift_allowed) {
utime_t warn_diff = now - last_clock_drift_warn;
if (warn_diff >
pow(g_conf()->mon_clock_drift_warn_backoff, clock_drift_warned)) {
mon.clog->warn() << "message from " << from << " was stamped " << diff
<< "s in the future, clocks not synchronized";
last_clock_drift_warn = ceph_clock_now();
++clock_drift_warned;
}
}
}
}
bool Paxos::do_refresh()
{
bool need_bootstrap = false;
// make sure we have the latest state loaded up
auto start = ceph::coarse_mono_clock::now();
mon.refresh_from_paxos(&need_bootstrap);
auto end = ceph::coarse_mono_clock::now();
logger->inc(l_paxos_refresh);
logger->tinc(l_paxos_refresh_latency, to_timespan(end - start));
if (need_bootstrap) {
dout(10) << " doing requested bootstrap" << dendl;
mon.bootstrap();
return false;
}
return true;
}
void Paxos::commit_proposal()
{
dout(10) << __func__ << dendl;
ceph_assert(mon.is_leader());
ceph_assert(is_refresh());
finish_contexts(g_ceph_context, committing_finishers);
}
void Paxos::finish_round()
{
dout(10) << __func__ << dendl;
ceph_assert(mon.is_leader());
// ok, now go active!
state = STATE_ACTIVE;
dout(20) << __func__ << " waiting_for_acting" << dendl;
finish_contexts(g_ceph_context, waiting_for_active);
dout(20) << __func__ << " waiting_for_readable" << dendl;
finish_contexts(g_ceph_context, waiting_for_readable);
dout(20) << __func__ << " waiting_for_writeable" << dendl;
finish_contexts(g_ceph_context, waiting_for_writeable);
dout(10) << __func__ << " done w/ waiters, state " << get_statename(state) << dendl;
if (should_trim()) {
trim();
}
if (is_active() && pending_proposal) {
propose_pending();
}
}
// peon
void Paxos::handle_lease(MonOpRequestRef op)
{
op->mark_paxos_event("handle_lease");
auto lease = op->get_req<MMonPaxos>();
// sanity
if (!mon.is_peon() ||
last_committed != lease->last_committed) {
dout(10) << "handle_lease i'm not a peon, or they're not the leader,"
<< " or the last_committed doesn't match, dropping" << dendl;
op->mark_paxos_event("invalid lease, ignore");
return;
}
warn_on_future_time(lease->sent_timestamp, lease->get_source());
// extend lease
if (auto new_expire = lease->lease_timestamp.to_real_time();
lease_expire < new_expire) {
lease_expire = new_expire;
auto now = ceph::real_clock::now();
if (lease_expire < now) {
auto diff = now - lease_expire;
derr << "lease_expire from " << lease->get_source_inst() << " is " << diff << " seconds in the past; mons are probably laggy (or possibly clocks are too skewed)" << dendl;
}
}
state = STATE_ACTIVE;
dout(10) << "handle_lease on " << lease->last_committed
<< " now " << lease_expire << dendl;
// ack
MMonPaxos *ack = new MMonPaxos(mon.get_epoch(), MMonPaxos::OP_LEASE_ACK,
ceph_clock_now());
ack->last_committed = last_committed;
ack->first_committed = first_committed;
ack->lease_timestamp = ceph_clock_now();
encode(mon.session_map.feature_map, ack->feature_map);
lease->get_connection()->send_message(ack);
// (re)set timeout event.
reset_lease_timeout();
// kick waiters
finish_contexts(g_ceph_context, waiting_for_active);
if (is_readable())
finish_contexts(g_ceph_context, waiting_for_readable);
}
void Paxos::handle_lease_ack(MonOpRequestRef op)
{
op->mark_paxos_event("handle_lease_ack");
auto ack = op->get_req<MMonPaxos>();
int from = ack->get_source().num();
if (!lease_ack_timeout_event) {
dout(10) << "handle_lease_ack from " << ack->get_source()
<< " -- stray (probably since revoked)" << dendl;
} else if (acked_lease.count(from) == 0) {
acked_lease.insert(from);
if (ack->feature_map.length()) {
auto p = ack->feature_map.cbegin();
FeatureMap& t = mon.quorum_feature_map[from];
decode(t, p);
}
if (acked_lease == mon.get_quorum()) {
// yay!
dout(10) << "handle_lease_ack from " << ack->get_source()
<< " -- got everyone" << dendl;
mon.timer.cancel_event(lease_ack_timeout_event);
lease_ack_timeout_event = 0;
} else {
dout(10) << "handle_lease_ack from " << ack->get_source()
<< " -- still need "
<< mon.get_quorum().size() - acked_lease.size()
<< " more" << dendl;
}
} else {
dout(10) << "handle_lease_ack from " << ack->get_source()
<< " dup (lagging!), ignoring" << dendl;
}
warn_on_future_time(ack->sent_timestamp, ack->get_source());
}
void Paxos::lease_ack_timeout()
{
dout(1) << "lease_ack_timeout -- calling new election" << dendl;
ceph_assert(mon.is_leader());
ceph_assert(is_active());
logger->inc(l_paxos_lease_ack_timeout);
lease_ack_timeout_event = 0;
mon.bootstrap();
}
void Paxos::reset_lease_timeout()
{
dout(20) << "reset_lease_timeout - setting timeout event" << dendl;
if (lease_timeout_event)
mon.timer.cancel_event(lease_timeout_event);
lease_timeout_event = mon.timer.add_event_after(
g_conf()->mon_lease_ack_timeout_factor * g_conf()->mon_lease,
new C_MonContext{&mon, [this](int r) {
if (r == -ECANCELED)
return;
lease_timeout();
}});
}
void Paxos::lease_timeout()
{
dout(1) << "lease_timeout -- calling new election" << dendl;
ceph_assert(mon.is_peon());
logger->inc(l_paxos_lease_timeout);
lease_timeout_event = 0;
mon.bootstrap();
}
void Paxos::lease_renew_timeout()
{
lease_renew_event = 0;
extend_lease();
}
/*
* trim old states
*/
void Paxos::trim()
{
ceph_assert(should_trim());
version_t end = std::min(get_version() - g_conf()->paxos_min,
get_first_committed() + g_conf()->paxos_trim_max);
if (first_committed >= end)
return;
dout(10) << "trim to " << end << " (was " << first_committed << ")" << dendl;
MonitorDBStore::TransactionRef t = get_pending_transaction();
for (version_t v = first_committed; v < end; ++v) {
dout(10) << "trim " << v << dendl;
t->erase(get_name(), v);
}
t->put(get_name(), "first_committed", end);
if (g_conf()->mon_compact_on_trim) {
dout(10) << " compacting trimmed range" << dendl;
t->compact_range(get_name(), stringify(first_committed - 1), stringify(end));
}
trimming = true;
queue_pending_finisher(new C_Trimmed(this));
}
/*
* return a globally unique, monotonically increasing proposal number
*/
version_t Paxos::get_new_proposal_number(version_t gt)
{
if (last_pn < gt)
last_pn = gt;
// update. make it unique among all monitors.
last_pn /= 100;
last_pn++;
last_pn *= 100;
last_pn += (version_t)mon.rank;
// write
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->put(get_name(), "last_pn", last_pn);
dout(30) << __func__ << " transaction dump:\n";
JSONFormatter f(true);
t->dump(&f);
f.flush(*_dout);
*_dout << dendl;
logger->inc(l_paxos_new_pn);
auto start = ceph::coarse_mono_clock::now();
get_store()->apply_transaction(t);
auto end = ceph::coarse_mono_clock::now();
logger->tinc(l_paxos_new_pn_latency, to_timespan(end - start));
dout(10) << "get_new_proposal_number = " << last_pn << dendl;
return last_pn;
}
void Paxos::cancel_events()
{
if (collect_timeout_event) {
mon.timer.cancel_event(collect_timeout_event);
collect_timeout_event = 0;
}
if (accept_timeout_event) {
mon.timer.cancel_event(accept_timeout_event);
accept_timeout_event = 0;
}
if (lease_renew_event) {
mon.timer.cancel_event(lease_renew_event);
lease_renew_event = 0;
}
if (lease_ack_timeout_event) {
mon.timer.cancel_event(lease_ack_timeout_event);
lease_ack_timeout_event = 0;
}
if (lease_timeout_event) {
mon.timer.cancel_event(lease_timeout_event);
lease_timeout_event = 0;
}
}
void Paxos::shutdown()
{
dout(10) << __func__ << " cancel all contexts" << dendl;
state = STATE_SHUTDOWN;
// discard pending transaction
pending_proposal.reset();
// Let store finish commits in progress
// XXX: I assume I can't use finish_contexts() because the store
// is going to trigger
unique_lock l{mon.lock, std::adopt_lock};
shutdown_cond.wait(l, [this] { return commits_started <= 0; });
// Monitor::shutdown() will unlock it
l.release();
finish_contexts(g_ceph_context, waiting_for_writeable, -ECANCELED);
finish_contexts(g_ceph_context, waiting_for_readable, -ECANCELED);
finish_contexts(g_ceph_context, waiting_for_active, -ECANCELED);
finish_contexts(g_ceph_context, pending_finishers, -ECANCELED);
finish_contexts(g_ceph_context, committing_finishers, -ECANCELED);
if (logger)
g_ceph_context->get_perfcounters_collection()->remove(logger);
}
void Paxos::leader_init()
{
cancel_events();
new_value.clear();
// discard pending transaction
pending_proposal.reset();
reset_pending_committing_finishers();
logger->inc(l_paxos_start_leader);
if (mon.get_quorum().size() == 1) {
state = STATE_ACTIVE;
return;
}
state = STATE_RECOVERING;
lease_expire = {};
dout(10) << "leader_init -- starting paxos recovery" << dendl;
collect(0);
}
void Paxos::peon_init()
{
cancel_events();
new_value.clear();
state = STATE_RECOVERING;
lease_expire = {};
dout(10) << "peon_init -- i am a peon" << dendl;
// start a timer, in case the leader never manages to issue a lease
reset_lease_timeout();
// discard pending transaction
pending_proposal.reset();
// no chance to write now!
reset_pending_committing_finishers();
finish_contexts(g_ceph_context, waiting_for_writeable, -EAGAIN);
logger->inc(l_paxos_start_peon);
}
void Paxos::restart()
{
dout(10) << "restart -- canceling timeouts" << dendl;
cancel_events();
new_value.clear();
if (is_writing() || is_writing_previous()) {
dout(10) << __func__ << " flushing" << dendl;
mon.lock.unlock();
mon.store->flush();
mon.lock.lock();
dout(10) << __func__ << " flushed" << dendl;
}
state = STATE_RECOVERING;
// discard pending transaction
pending_proposal.reset();
reset_pending_committing_finishers();
finish_contexts(g_ceph_context, waiting_for_active, -EAGAIN);
logger->inc(l_paxos_restart);
}
void Paxos::reset_pending_committing_finishers()
{
committing_finishers.splice(committing_finishers.end(), pending_finishers);
finish_contexts(g_ceph_context, committing_finishers, -EAGAIN);
}
void Paxos::dispatch(MonOpRequestRef op)
{
ceph_assert(op->is_type_paxos());
op->mark_paxos_event("dispatch");
if (op->get_req()->get_type() != MSG_MON_PAXOS) {
dout(0) << "Got unexpected message type " << op->get_req()->get_type()
<< " in Paxos::dispatch, aborting!" << dendl;
ceph_abort();
}
auto *req = op->get_req<MMonPaxos>();
// election in progress?
if (!mon.is_leader() && !mon.is_peon()) {
dout(5) << "election in progress, dropping " << *req << dendl;
return;
}
// check sanity
ceph_assert(mon.is_leader() ||
(mon.is_peon() && req->get_source().num() == mon.get_leader()));
// NOTE: these ops are defined in messages/MMonPaxos.h
switch (req->op) {
// learner
case MMonPaxos::OP_COLLECT:
handle_collect(op);
break;
case MMonPaxos::OP_LAST:
handle_last(op);
break;
case MMonPaxos::OP_BEGIN:
handle_begin(op);
break;
case MMonPaxos::OP_ACCEPT:
handle_accept(op);
break;
case MMonPaxos::OP_COMMIT:
handle_commit(op);
break;
case MMonPaxos::OP_LEASE:
handle_lease(op);
break;
case MMonPaxos::OP_LEASE_ACK:
handle_lease_ack(op);
break;
default:
ceph_abort();
}
}
// -----------------
// service interface
// -- READ --
bool Paxos::is_readable(version_t v)
{
bool ret;
if (v > last_committed)
ret = false;
else
ret =
(mon.is_peon() || mon.is_leader()) &&
(is_active() || is_updating() || is_writing()) &&
last_committed > 0 && is_lease_valid(); // must have a value alone, or have lease
dout(5) << __func__ << " = " << (int)ret
<< " - now=" << ceph_clock_now()
<< " lease_expire=" << lease_expire
<< " has v" << v << " lc " << last_committed
<< dendl;
return ret;
}
bool Paxos::read(version_t v, bufferlist &bl)
{
if (!get_store()->get(get_name(), v, bl))
return false;
return true;
}
version_t Paxos::read_current(bufferlist &bl)
{
if (read(last_committed, bl))
return last_committed;
return 0;
}
bool Paxos::is_lease_valid()
{
return ((mon.get_quorum().size() == 1)
|| (ceph::real_clock::now() < lease_expire));
}
// -- WRITE --
bool Paxos::is_writeable()
{
return
mon.is_leader() &&
is_active() &&
is_lease_valid();
}
void Paxos::propose_pending()
{
ceph_assert(is_active());
ceph_assert(pending_proposal);
cancel_events();
bufferlist bl;
pending_proposal->encode(bl);
dout(10) << __func__ << " " << (last_committed + 1)
<< " " << bl.length() << " bytes" << dendl;
dout(30) << __func__ << " transaction dump:\n";
JSONFormatter f(true);
pending_proposal->dump(&f);
f.flush(*_dout);
*_dout << dendl;
pending_proposal.reset();
committing_finishers.swap(pending_finishers);
state = STATE_UPDATING;
begin(bl);
}
void Paxos::queue_pending_finisher(Context *onfinished)
{
dout(5) << __func__ << " " << onfinished << dendl;
ceph_assert(onfinished);
pending_finishers.push_back(onfinished);
}
MonitorDBStore::TransactionRef Paxos::get_pending_transaction()
{
ceph_assert(mon.is_leader());
if (!pending_proposal) {
pending_proposal.reset(new MonitorDBStore::Transaction);
ceph_assert(pending_finishers.empty());
}
return pending_proposal;
}
bool Paxos::trigger_propose()
{
if (plugged) {
dout(10) << __func__ << " plugged, not proposing now" << dendl;
return false;
} else if (is_active()) {
dout(10) << __func__ << " active, proposing now" << dendl;
propose_pending();
return true;
} else {
dout(10) << __func__ << " not active, will propose later" << dendl;
return false;
}
}
bool Paxos::is_consistent()
{
return (first_committed <= last_committed);
}
| 47,542 | 28.863693 | 177 |
cc
|
null |
ceph-main/src/mon/Paxos.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/*
time---->
cccccccccccccccccca????????????????????????????????????????
cccccccccccccccccca????????????????????????????????????????
cccccccccccccccccca???????????????????????????????????????? leader
cccccccccccccccccc?????????????????????????????????????????
ccccc??????????????????????????????????????????????????????
last_committed
pn_from
pn
a 12v
b 12v
c 14v
d
e 12v
*/
/**
* Paxos storage layout and behavior
*
* Currently, we use a key/value store to hold all the Paxos-related data, but
* it can logically be depicted as this:
*
* paxos:
* first_committed -> 1
* last_committed -> 4
* 1 -> value_1
* 2 -> value_2
* 3 -> value_3
* 4 -> value_4
*
* Since we are relying on a k/v store supporting atomic transactions, we can
* guarantee that if 'last_committed' has a value of '4', then we have up to
* version 4 on the store, and no more than that; the same applies to
* 'first_committed', which holding '1' will strictly meaning that our lowest
* version is 1.
*
* Each version's value (value_1, value_2, ..., value_n) is a blob of data,
* incomprehensible to the Paxos. These values are proposed to the Paxos on
* propose_new_value() and each one is a transaction encoded in a ceph::buffer::list.
*
* The Paxos will write the value to disk, associating it with its version,
* but will take a step further: the value shall be decoded, and the operations
* on that transaction shall be applied during the same transaction that will
* write the value's encoded ceph::buffer::list to disk. This behavior ensures that
* whatever is being proposed will only be available on the store when it is
* applied by Paxos, which will then be aware of such new values, guaranteeing
* the store state is always consistent without requiring shady workarounds.
*
* So, let's say that FooMonitor proposes the following transaction, neatly
* encoded on a ceph::buffer::list of course:
*
* Tx_Foo
* put(foo, last_committed, 3)
* put(foo, 3, foo_value_3)
* erase(foo, 2)
* erase(foo, 1)
* put(foo, first_committed, 3)
*
* And knowing that the Paxos is proposed Tx_Foo as a ceph::buffer::list, once it is
* ready to commit, and assuming we are now committing version 5 of the Paxos,
* we will do something along the lines of:
*
* Tx proposed_tx;
* proposed_tx.decode(Tx_foo_ceph::buffer::list);
*
* Tx our_tx;
* our_tx.put(paxos, last_committed, 5);
* our_tx.put(paxos, 5, Tx_foo_ceph::buffer::list);
* our_tx.append(proposed_tx);
*
* store_apply(our_tx);
*
* And the store should look like this after we apply 'our_tx':
*
* paxos:
* first_committed -> 1
* last_committed -> 5
* 1 -> value_1
* 2 -> value_2
* 3 -> value_3
* 4 -> value_4
* 5 -> Tx_foo_ceph::buffer::list
* foo:
* first_committed -> 3
* last_committed -> 3
* 3 -> foo_value_3
*
*/
#ifndef CEPH_MON_PAXOS_H
#define CEPH_MON_PAXOS_H
#include "include/types.h"
#include "mon_types.h"
#include "include/buffer.h"
#include "msg/msg_types.h"
#include "include/Context.h"
#include "common/perf_counters.h"
#include <errno.h>
#include "MonitorDBStore.h"
#include "mon/MonOpRequest.h"
class Monitor;
class MMonPaxos;
enum {
l_paxos_first = 45800,
l_paxos_start_leader,
l_paxos_start_peon,
l_paxos_restart,
l_paxos_refresh,
l_paxos_refresh_latency,
l_paxos_begin,
l_paxos_begin_keys,
l_paxos_begin_bytes,
l_paxos_begin_latency,
l_paxos_commit,
l_paxos_commit_keys,
l_paxos_commit_bytes,
l_paxos_commit_latency,
l_paxos_collect,
l_paxos_collect_keys,
l_paxos_collect_bytes,
l_paxos_collect_latency,
l_paxos_collect_uncommitted,
l_paxos_collect_timeout,
l_paxos_accept_timeout,
l_paxos_lease_ack_timeout,
l_paxos_lease_timeout,
l_paxos_store_state,
l_paxos_store_state_keys,
l_paxos_store_state_bytes,
l_paxos_store_state_latency,
l_paxos_share_state,
l_paxos_share_state_keys,
l_paxos_share_state_bytes,
l_paxos_new_pn,
l_paxos_new_pn_latency,
l_paxos_last,
};
// i am one state machine.
/**
* This library is based on the Paxos algorithm, but varies in a few key ways:
* 1- Only a single new value is generated at a time, simplifying the recovery logic.
* 2- Nodes track "committed" values, and share them generously (and trustingly)
* 3- A 'leasing' mechanism is built-in, allowing nodes to determine when it is
* safe to "read" their copy of the last committed value.
*
* This provides a simple replication substrate that services can be built on top of.
* See PaxosService.h
*/
class Paxos {
/**
* @defgroup Paxos_h_class Paxos
* @{
*/
/**
* The Monitor to which this Paxos class is associated with.
*/
Monitor &mon;
/// perf counter for internal instrumentations
PerfCounters *logger;
void init_logger();
// my state machine info
const std::string paxos_name;
friend class Monitor;
friend class PaxosService;
std::list<std::string> extra_state_dirs;
// LEADER+PEON
// -- generic state --
public:
/**
* @defgroup Paxos_h_states States on which the leader/peon may be.
* @{
*/
enum {
/**
* Leader/Peon is in Paxos' Recovery state
*/
STATE_RECOVERING,
/**
* Leader/Peon is idle, and the Peon may or may not have a valid lease.
*/
STATE_ACTIVE,
/**
* Leader/Peon is updating to a new value.
*/
STATE_UPDATING,
/*
* Leader proposing an old value
*/
STATE_UPDATING_PREVIOUS,
/*
* Leader/Peon is writing a new commit. readable, but not
* writeable.
*/
STATE_WRITING,
/*
* Leader/Peon is writing a new commit from a previous round.
*/
STATE_WRITING_PREVIOUS,
// leader: refresh following a commit
STATE_REFRESH,
// Shutdown after WRITING or WRITING_PREVIOUS
STATE_SHUTDOWN
};
/**
* Obtain state name from constant value.
*
* @note This function will raise a fatal error if @p s is not
* a valid state value.
*
* @param s State value.
* @return The state's name.
*/
static const std::string get_statename(int s) {
switch (s) {
case STATE_RECOVERING:
return "recovering";
case STATE_ACTIVE:
return "active";
case STATE_UPDATING:
return "updating";
case STATE_UPDATING_PREVIOUS:
return "updating-previous";
case STATE_WRITING:
return "writing";
case STATE_WRITING_PREVIOUS:
return "writing-previous";
case STATE_REFRESH:
return "refresh";
case STATE_SHUTDOWN:
return "shutdown";
default:
return "UNKNOWN";
}
}
private:
/**
* The state we are in.
*/
int state;
/**
* @}
*/
int commits_started = 0;
ceph::condition_variable shutdown_cond;
public:
/**
* Check if we are recovering.
*
* @return 'true' if we are on the Recovering state; 'false' otherwise.
*/
bool is_recovering() const { return (state == STATE_RECOVERING); }
/**
* Check if we are active.
*
* @return 'true' if we are on the Active state; 'false' otherwise.
*/
bool is_active() const { return state == STATE_ACTIVE; }
/**
* Check if we are updating.
*
* @return 'true' if we are on the Updating state; 'false' otherwise.
*/
bool is_updating() const { return state == STATE_UPDATING; }
/**
* Check if we are updating/proposing a previous value from a
* previous quorum
*/
bool is_updating_previous() const { return state == STATE_UPDATING_PREVIOUS; }
/// @return 'true' if we are writing an update to disk
bool is_writing() const { return state == STATE_WRITING; }
/// @return 'true' if we are writing an update-previous to disk
bool is_writing_previous() const { return state == STATE_WRITING_PREVIOUS; }
/// @return 'true' if we are refreshing an update just committed
bool is_refresh() const { return state == STATE_REFRESH; }
/// @return 'true' if we are in the process of shutting down
bool is_shutdown() const { return state == STATE_SHUTDOWN; }
private:
/**
* @defgroup Paxos_h_recovery_vars Common recovery-related member variables
* @note These variables are common to both the Leader and the Peons.
* @{
*/
/**
*
*/
version_t first_committed;
/**
* Last Proposal Number
*
* @todo Expand description
*/
version_t last_pn;
/**
* Last committed value's version.
*
* On both the Leader and the Peons, this is the last value's version that
* was accepted by a given quorum and thus committed, that this instance
* knows about.
*
* @note It may not be the last committed value's version throughout the
* system. If we are a Peon, we may have not been part of the quorum
* that accepted the value, and for this very same reason we may still
* be a (couple of) version(s) behind, until we learn about the most
* recent version. This should only happen if we are not active (i.e.,
* part of the quorum), which should not happen if we are up, running
* and able to communicate with others -- thus able to be part of the
* monmap and trigger new elections.
*/
version_t last_committed;
/**
* Last committed value's time.
*
* When the commit finished.
*/
utime_t last_commit_time;
/**
* The last Proposal Number we have accepted.
*
* On the Leader, it will be the Proposal Number picked by the Leader
* itself. On the Peon, however, it will be the proposal sent by the Leader
* and it will only be updated if its value is higher than the one
* already known by the Peon.
*/
version_t accepted_pn;
/**
* The last_committed epoch of the leader at the time we accepted the last pn.
*
* This has NO SEMANTIC MEANING, and is there only for the debug output.
*/
version_t accepted_pn_from;
/**
* Map holding the first committed version by each quorum member.
*
* The versions kept in this map are updated during the collect phase.
* When the Leader starts the collect phase, each Peon will reply with its
* first committed version, which will then be kept in this map.
*/
std::map<int,version_t> peer_first_committed;
/**
* Map holding the last committed version by each quorum member.
*
* The versions kept in this map are updated during the collect phase.
* When the Leader starts the collect phase, each Peon will reply with its
* last committed version, which will then be kept in this map.
*/
std::map<int,version_t> peer_last_committed;
/**
* @}
*/
// active (phase 2)
/**
* @defgroup Paxos_h_active_vars Common active-related member variables
* @{
*/
/**
* When does our read lease expires.
*
* Instead of performing a full commit each time a read is requested, we
* keep leases. Each lease will have an expiration date, which may or may
* not be extended.
*/
ceph::real_clock::time_point lease_expire;
/**
* List of callbacks waiting for our state to change into STATE_ACTIVE.
*/
std::list<Context*> waiting_for_active;
/**
* List of callbacks waiting for the chance to read a version from us.
*
* Each entry on the list may result from an attempt to read a version that
* wasn't available at the time, or an attempt made during a period during
* which we could not satisfy the read request. The first case happens if
* the requested version is greater than our last committed version. The
* second scenario may happen if we are recovering, or if we don't have a
* valid lease.
*
* The list will be woken up once we change to STATE_ACTIVE with an extended
* lease -- which can be achieved if we have everyone on the quorum on board
* with the latest proposal, or if we don't really care about the remaining
* uncommitted values --, or if we're on a quorum of one.
*/
std::list<Context*> waiting_for_readable;
/**
* @}
*/
// -- leader --
// recovery (paxos phase 1)
/**
* @defgroup Paxos_h_leader_recovery Leader-specific Recovery-related vars
* @{
*/
/**
* Number of replies to the collect phase we've received so far.
*
* This variable is reset to 1 each time we start a collect phase; it is
* incremented each time we receive a reply to the collect message, and
* is used to determine whether or not we have received replies from the
* whole quorum.
*/
unsigned num_last;
/**
* Uncommitted value's version.
*
* If we have, or end up knowing about, an uncommitted value, then its
* version will be kept in this variable.
*
* @note If this version equals @p last_committed+1 when we reach the final
* steps of recovery, then the algorithm will assume this is a value
* the Leader does not know about, and trustingly the Leader will
* propose this version's value.
*/
version_t uncommitted_v;
/**
* Uncommitted value's Proposal Number.
*
* We use this variable to assess if the Leader should take into consideration
* an uncommitted value sent by a Peon. Given that the Peon will send back to
* the Leader the last Proposal Number it accepted, the Leader will be able
* to infer if this value is more recent than the one the Leader has, thus
* more relevant.
*/
version_t uncommitted_pn;
/**
* Uncommitted Value.
*
* If the system fails in-between the accept replies from the Peons and the
* instruction to commit from the Leader, then we may end up with accepted
* but yet-uncommitted values. During the Leader's recovery, it will attempt
* to bring the whole system to the latest state, and that means committing
* past accepted but uncommitted values.
*
* This variable will hold an uncommitted value, which may originate either
* on the Leader, or learnt by the Leader from a Peon during the collect
* phase.
*/
ceph::buffer::list uncommitted_value;
/**
* Used to specify when an on-going collect phase times out.
*/
Context *collect_timeout_event;
/**
* @}
*/
// active
/**
* @defgroup Paxos_h_leader_active Leader-specific Active-related vars
* @{
*/
/**
* Set of participants (Leader & Peons) that have acked a lease extension.
*
* Each Peon that acknowledges a lease extension will have its place in this
* set, which will be used to account for all the acks from all the quorum
* members, guaranteeing that we trigger new elections if some don't ack in
* the expected timeframe.
*/
std::set<int> acked_lease;
/**
* Callback responsible for extending the lease periodically.
*/
Context *lease_renew_event;
/**
* Callback to trigger new elections once the time for acks is out.
*/
Context *lease_ack_timeout_event;
/**
* @}
*/
/**
* @defgroup Paxos_h_peon_active Peon-specific Active-related vars
* @{
*/
/**
* Callback to trigger new elections when the Peon's lease times out.
*
* If the Peon's lease is extended, this callback will be reset (i.e.,
* we cancel the event and reschedule a new one with starting from the
* beginning).
*/
Context *lease_timeout_event;
/**
* @}
*/
// updating (paxos phase 2)
/**
* @defgroup Paxos_h_leader_updating Leader-specific Updating-related vars
* @{
*/
/**
* New Value being proposed to the Peons.
*
* This ceph::buffer::list holds the value the Leader is proposing to the Peons, and
* that will be committed if the Peons do accept the proposal.
*/
ceph::buffer::list new_value;
/**
* Set of participants (Leader & Peons) that accepted the new proposed value.
*
* This set is used to keep track of those who have accepted the proposed
* value, so the leader may know when to issue a commit (when a majority of
* participants has accepted the proposal), and when to extend the lease
* (when all the quorum members have accepted the proposal).
*/
std::set<int> accepted;
/**
* Callback to trigger a new election if the proposal is not accepted by the
* full quorum within a given timeframe.
*
* If the full quorum does not accept the proposal, then it means that the
* Leader may no longer be recognized as the leader, or that the quorum has
* changed, and the value may have not reached all the participants. Thus,
* the leader must call new elections, and go through a recovery phase in
* order to propagate the new value throughout the system.
*
* This does not mean that we won't commit. We will commit as soon as we
* have a majority of acceptances. But if we do not have full acceptance
* from the quorum, then we cannot extend the lease, as some participants
* may not have the latest committed value.
*/
Context *accept_timeout_event;
/**
* List of callbacks waiting for it to be possible to write again.
*
* @remarks It is not possible to write if we are not the Leader, or we are
* not on the active state, or if the lease has expired.
*/
std::list<Context*> waiting_for_writeable;
/**
* Pending proposal transaction
*
* This is the transaction that is under construction and pending
* proposal. We will add operations to it until we decide it is
* time to start a paxos round.
*/
MonitorDBStore::TransactionRef pending_proposal;
/**
* Finishers for pending transaction
*
* These are waiting for updates in the pending proposal/transaction
* to be committed.
*/
std::list<Context*> pending_finishers;
/**
* Finishers for committing transaction
*
* When the pending_proposal is submitted, pending_finishers move to
* this list. When it commits, these finishers are notified.
*/
std::list<Context*> committing_finishers;
/**
* This function re-triggers pending_ and committing_finishers
* safely, so as to maintain existing system invariants. In particular
* we maintain ordering by triggering committing before pending, and
* we clear out pending_finishers prior to any triggers so that
* we don't trigger asserts on them being empty. You should
* use it instead of sending -EAGAIN to them with finish_contexts.
*/
void reset_pending_committing_finishers();
/**
* @defgroup Paxos_h_sync_warns Synchronization warnings
* @todo Describe these variables
* @{
*/
utime_t last_clock_drift_warn;
int clock_drift_warned;
/**
* @}
*/
/**
* Should be true if we have proposed to trim, or are in the middle of
* trimming; false otherwise.
*/
bool trimming;
/**
* true if we want trigger_propose to *not* propose (yet)
*/
bool plugged = false;
/**
* @defgroup Paxos_h_callbacks Callback classes.
* @{
*/
/**
* Callback class responsible for handling a Collect Timeout.
*/
class C_CollectTimeout;
/**
* Callback class responsible for handling an Accept Timeout.
*/
class C_AcceptTimeout;
/**
* Callback class responsible for handling a Lease Ack Timeout.
*/
class C_LeaseAckTimeout;
/**
* Callback class responsible for handling a Lease Timeout.
*/
class C_LeaseTimeout;
/**
* Callback class responsible for handling a Lease Renew Timeout.
*/
class C_LeaseRenew;
class C_Trimmed;
/**
*
*/
public:
class C_Proposal : public Context {
Context *proposer_context;
public:
ceph::buffer::list bl;
// for debug purposes. Will go away. Soon.
bool proposed;
utime_t proposal_time;
C_Proposal(Context *c, ceph::buffer::list& proposal_bl) :
proposer_context(c),
bl(proposal_bl),
proposed(false),
proposal_time(ceph_clock_now())
{ }
void finish(int r) override {
if (proposer_context) {
proposer_context->complete(r);
proposer_context = NULL;
}
}
};
/**
* @}
*/
private:
/**
* @defgroup Paxos_h_election_triggered Steps triggered by an election.
*
* @note All these functions play a significant role in the Recovery Phase,
* which is triggered right after an election once someone becomes
* the Leader.
* @{
*/
/**
* Create a new Proposal Number and propose it to the Peons.
*
* This function starts the Recovery Phase, which can be directly mapped
* onto the original Paxos' Prepare phase. Basically, we'll generate a
* Proposal Number, taking @p oldpn into consideration, and we will send
* it to a quorum, along with our first and last committed versions. By
* sending these information in a message to the quorum, we expect to
* obtain acceptances from a majority, allowing us to commit, or be
* informed of a higher Proposal Number known by one or more of the Peons
* in the quorum.
*
* @pre We are the Leader.
* @post Recovery Phase initiated by sending messages to the quorum.
*
* @param oldpn A proposal number taken as the highest known so far, that
* should be taken into consideration when generating a new
* Proposal Number for the Recovery Phase.
*/
void collect(version_t oldpn);
/**
* Handle the reception of a collect message from the Leader and reply
* accordingly.
*
* Once a Peon receives a collect message from the Leader it will reply
* with its first and last committed versions, as well as information so
* the Leader may know if its Proposal Number was, or was not, accepted by
* the Peon. The Peon will accept the Leader's Proposal Number if it is
* higher than the Peon's currently accepted Proposal Number. The Peon may
* also inform the Leader of accepted but uncommitted values.
*
* @invariant The message is an operation of type OP_COLLECT.
* @pre We are a Peon.
* @post Replied to the Leader, accepting or not accepting its PN.
*
* @param collect The collect message sent by the Leader to the Peon.
*/
void handle_collect(MonOpRequestRef op);
/**
* Handle a response from a Peon to the Leader's collect phase.
*
* The received message will state the Peon's last committed version, as
* well as its last proposal number. This will lead to one of the following
* scenarios: if the replied Proposal Number is equal to the one we proposed,
* then the Peon has accepted our proposal, and if all the Peons do accept
* our Proposal Number, then we are allowed to proceed with the commit;
* however, if a Peon replies with a higher Proposal Number, we assume he
* knows something we don't and the Leader will have to abort the current
* proposal in order to retry with the Proposal Number specified by the Peon.
* It may also occur that the Peon replied with a lower Proposal Number, in
* which case we assume it is a reply to an older value and we'll simply
* drop it.
* This function will also check if the Peon replied with an accepted but
* yet uncommitted value. In this case, if its version is higher than our
* last committed value by one, we assume that the Peon knows a value from a
* previous proposal that has never been committed, and we should try to
* commit that value by proposing it next. On the other hand, if that is
* not the case, we'll assume it is an old, uncommitted value, we do not
* care about and we'll consider the system active by extending the leases.
*
* @invariant The message is an operation of type OP_LAST.
* @pre We are the Leader.
* @post We initiate a commit, or we retry with a higher Proposal Number,
* or we drop the message.
* @post We move from STATE_RECOVERING to STATE_ACTIVE.
*
* @param last The message sent by the Peon to the Leader.
*/
void handle_last(MonOpRequestRef op);
/**
* The Recovery Phase timed out, meaning that a significant part of the
* quorum does not believe we are the Leader, and we thus should trigger new
* elections.
*
* @pre We believe to be the Leader.
* @post Trigger new elections.
*/
void collect_timeout();
/**
* @}
*/
/**
* @defgroup Paxos_h_updating_funcs Functions used during the Updating State
*
* These functions may easily be mapped to the original Paxos Algorithm's
* phases.
*
* Taking into account the algorithm can be divided in 4 phases (Prepare,
* Promise, Accept Request and Accepted), we can easily map Paxos::begin to
* both the Prepare and Accept Request phases; the Paxos::handle_begin to
* the Promise phase; and the Paxos::handle_accept to the Accepted phase.
* @{
*/
/**
* Start a new proposal with the intent of committing @p value.
*
* If we are alone on the system (i.e., a quorum of one), then we will
* simply commit the value, but if we are not alone, then we need to propose
* the value to the quorum.
*
* @pre We are the Leader
* @pre We are on STATE_ACTIVE
* @post We commit, if we are alone, or we send a message to each quorum
* member
* @post We are on STATE_ACTIVE, if we are alone, or on
* STATE_UPDATING otherwise
*
* @param value The value being proposed to the quorum
*/
void begin(ceph::buffer::list& value);
/**
* Accept or decline (by ignoring) a proposal from the Leader.
*
* We will decline the proposal (by ignoring it) if we have promised to
* accept a higher numbered proposal. If that is not the case, we will
* accept it and accordingly reply to the Leader.
*
* @pre We are a Peon
* @pre We are on STATE_ACTIVE
* @post We are on STATE_UPDATING if we accept the Leader's proposal
* @post We send a reply message to the Leader if we accept its proposal
*
* @invariant The received message is an operation of type OP_BEGIN
*
* @param begin The message sent by the Leader to the Peon during the
* Paxos::begin function
*
*/
void handle_begin(MonOpRequestRef op);
/**
* Handle an Accept message sent by a Peon.
*
* In order to commit, the Leader has to receive accepts from a majority of
* the quorum. If that does happen, then the Leader may proceed with the
* commit. However, the Leader needs the accepts from all the quorum members
* in order to extend the lease and move on to STATE_ACTIVE.
*
* This function handles these two situations, accounting for the amount of
* received accepts.
*
* @pre We are the Leader
* @pre We are on STATE_UPDATING
* @post We are on STATE_ACTIVE if we received accepts from the full quorum
* @post We extended the lease if we moved on to STATE_ACTIVE
* @post We are on STATE_UPDATING if we didn't received accepts from the
* full quorum
* @post We have committed if we received accepts from a majority
*
* @invariant The received message is an operation of type OP_ACCEPT
*
* @param accept The message sent by the Peons to the Leader during the
* Paxos::handle_begin function
*/
void handle_accept(MonOpRequestRef op);
/**
* Trigger a fresh election.
*
* During Paxos::begin we set a Callback of type Paxos::C_AcceptTimeout in
* order to limit the amount of time we spend waiting for Accept replies.
* This callback will call Paxos::accept_timeout when it is fired.
*
* This is essential to the algorithm because there may be the chance that
* we are no longer the Leader (i.e., others don't believe in us) and we
* are getting ignored, or we dropped out of the quorum and haven't realised
* it. So, our only option is to trigger fresh elections.
*
* @pre We are the Leader
* @pre We are on STATE_UPDATING
* @post Triggered fresh elections
*/
void accept_timeout();
/**
* @}
*/
utime_t commit_start_stamp;
friend struct C_Committed;
/**
* Commit a value throughout the system.
*
* The Leader will cancel the current lease (as it was for the old value),
* and will store the committed value locally. It will then instruct every
* quorum member to do so as well.
*
* @pre We are the Leader
* @pre We are on STATE_UPDATING
* @pre A majority of quorum members accepted our proposal
* @post Value locally stored
* @post Quorum members instructed to commit the new value.
*/
void commit_start();
void commit_finish(); ///< finish a commit after txn becomes durable
void abort_commit(); ///< Handle commit finish after shutdown started
/**
* Commit the new value to stable storage as being the latest available
* version.
*
* @pre We are a Peon
* @post The new value is locally stored
* @post Fire up the callbacks waiting on waiting_for_commit
*
* @invariant The received message is an operation of type OP_COMMIT
*
* @param commit The message sent by the Leader to the Peon during
* Paxos::commit
*/
void handle_commit(MonOpRequestRef op);
/**
* Extend the system's lease.
*
* This means that the Leader considers that it should now safe to read from
* any node on the system, since every quorum member is now in possession of
* the latest version. Therefore, the Leader will send a message stating just
* this to each quorum member, and will impose a limited timeframe during
* which acks will be accepted. If there aren't as many acks as expected
* (i.e, if at least one quorum member does not ack the lease) during this
* timeframe, then we will force fresh elections.
*
* @pre We are the Leader
* @pre We are on STATE_ACTIVE
* @post A message extending the lease is sent to each quorum member
* @post A timeout callback is set to limit the amount of time we will wait
* for lease acks.
* @post A timer is set in order to renew the lease after a certain amount
* of time.
*/
void extend_lease();
/**
* Update the lease on the Peon's side of things.
*
* Once a Peon receives a Lease message, it will update its lease_expire
* variable, reply to the Leader acknowledging the lease update and set a
* timeout callback to be fired upon the lease's expiration. Finally, the
* Peon will fire up all the callbacks waiting for it to become active,
* which it just did, and all those waiting for it to become readable,
* which should be true if the Peon's lease didn't expire in the mean time.
*
* @pre We are a Peon
* @post We update the lease accordingly
* @post A lease timeout callback is set
* @post Move to STATE_ACTIVE
* @post Fire up all the callbacks waiting for STATE_ACTIVE
* @post Fire up all the callbacks waiting for readable if we are readable
* @post Ack the lease to the Leader
*
* @invariant The received message is an operation of type OP_LEASE
*
* @param lease The message sent by the Leader to the Peon during the
* Paxos::extend_lease function
*/
void handle_lease(MonOpRequestRef op);
/**
* Account for all the Lease Acks the Leader receives from the Peons.
*
* Once the Leader receives all the Lease Acks from the Peons, it will be
* able to cancel the Lease Ack timeout callback, thus avoiding calling
* fresh elections.
*
* @pre We are the Leader
* @post Cancel the Lease Ack timeout callback if we receive acks from all
* the quorum members
*
* @invariant The received message is an operation of type OP_LEASE_ACK
*
* @param ack The message sent by a Peon to the Leader during the
* Paxos::handle_lease function
*/
void handle_lease_ack(MonOpRequestRef op);
/**
* Call fresh elections because at least one Peon didn't acked our lease.
*
* @pre We are the Leader
* @pre We are on STATE_ACTIVE
* @post Trigger fresh elections
*/
void lease_ack_timeout();
/**
* Extend lease since we haven't had new committed values meanwhile.
*
* @pre We are the Leader
* @pre We are on STATE_ACTIVE
* @post Go through with Paxos::extend_lease
*/
void lease_renew_timeout();
/**
* Call fresh elections because the Peon's lease expired without being
* renewed or receiving a fresh lease.
*
* This means that the Peon is no longer assumed as being in the quorum
* (or there is no Leader to speak of), so just trigger fresh elections
* to circumvent this issue.
*
* @pre We are a Peon
* @post Trigger fresh elections
*/
void lease_timeout(); // on peon, if lease isn't extended
/// restart the lease timeout timer
void reset_lease_timeout();
/**
* Cancel all of Paxos' timeout/renew events.
*/
void cancel_events();
/**
* Shutdown this Paxos machine
*/
void shutdown();
/**
* Generate a new Proposal Number based on @p gt
*
* @todo Check what @p gt actually means and what its usage entails
* @param gt A hint for the geration of the Proposal Number
* @return A globally unique, monotonically increasing Proposal Number
*/
version_t get_new_proposal_number(version_t gt=0);
/**
* @todo document sync function
*/
void warn_on_future_time(utime_t t, entity_name_t from);
/**
* Begin proposing the pending_proposal.
*/
void propose_pending();
/**
* refresh state from store
*
* Called when we have new state for the mon to consume. If we return false,
* abort (we triggered a bootstrap).
*
* @returns true on success, false if we are now bootstrapping
*/
bool do_refresh();
void commit_proposal();
void finish_round();
public:
/**
* @param m A monitor
* @param name A name for the paxos service. It serves as the naming space
* of the underlying persistent storage for this service.
*/
Paxos(Monitor &m, const std::string &name)
: mon(m),
logger(NULL),
paxos_name(name),
state(STATE_RECOVERING),
first_committed(0),
last_pn(0),
last_committed(0),
accepted_pn(0),
accepted_pn_from(0),
num_last(0),
uncommitted_v(0), uncommitted_pn(0),
collect_timeout_event(0),
lease_renew_event(0),
lease_ack_timeout_event(0),
lease_timeout_event(0),
accept_timeout_event(0),
clock_drift_warned(0),
trimming(false) { }
~Paxos() {
delete logger;
}
const std::string get_name() const {
return paxos_name;
}
void dispatch(MonOpRequestRef op);
void read_and_prepare_transactions(MonitorDBStore::TransactionRef tx,
version_t from, version_t last);
void init();
/**
* dump state info to a formatter
*/
void dump_info(ceph::Formatter *f);
/**
* This function runs basic consistency checks. Importantly, if
* it is inconsistent and shouldn't be, it asserts out.
*
* @return True if consistent, false if not.
*/
bool is_consistent();
void restart();
/**
* Initiate the Leader after it wins an election.
*
* Once an election is won, the Leader will be initiated and there are two
* possible outcomes of this method: the Leader directly jumps to the active
* state (STATE_ACTIVE) if it believes to be the only one in the quorum, or
* will start recovering (STATE_RECOVERING) by initiating the collect phase.
*
* @pre Our monitor is the Leader.
* @post We are either on STATE_ACTIVE if we're the only one in the quorum,
* or on STATE_RECOVERING otherwise.
*/
void leader_init();
/**
* Initiate a Peon after it loses an election.
*
* If we are a Peon, then there must be a Leader and we are not alone in the
* quorum, thus automatically assume we are on STATE_RECOVERING, which means
* we will soon be enrolled into the Leader's collect phase.
*
* @pre There is a Leader, and it?s about to start the collect phase.
* @post We are on STATE_RECOVERING and will soon receive collect phase's
* messages.
*/
void peon_init();
/**
* Include an incremental state of values, ranging from peer_first_committed
* to the last committed value, on the message m
*
* @param m A message
* @param peer_first_committed Lowest version to take into account
* @param peer_last_committed Highest version to take into account
*/
void share_state(MMonPaxos *m, version_t peer_first_committed,
version_t peer_last_committed);
/**
* Store on disk a state that was shared with us
*
* Basically, we received a set of version. Or just one. It doesn't matter.
* What matters is that we have to stash it in the store. So, we will simply
* write every single ceph::buffer::list into their own versions on our side (i.e.,
* onto paxos-related keys), and then we will decode those same ceph::buffer::lists
* we just wrote and apply the transactions they hold. We will also update
* our first and last committed values to point to the new values, if need
* be. All this is done tightly wrapped in a transaction to ensure we
* enjoy the atomicity guarantees given by our awesome k/v store.
*
* @param m A message
* @returns true if we stored something new; false otherwise
*/
bool store_state(MMonPaxos *m);
void _sanity_check_store();
/**
* Helper function to decode a ceph::buffer::list into a transaction and append it
* to another transaction.
*
* This function is used during the Leader's commit and during the
* Paxos::store_state in order to apply the ceph::buffer::list's transaction onto
* the store.
*
* @param t The transaction to which we will append the operations
* @param bl A ceph::buffer::list containing an encoded transaction
*/
static void decode_append_transaction(MonitorDBStore::TransactionRef t,
ceph::buffer::list& bl) {
auto vt(std::make_shared<MonitorDBStore::Transaction>());
auto it = bl.cbegin();
vt->decode(it);
t->append(vt);
}
/**
* @todo This appears to be used only by the OSDMonitor, and I would say
* its objective is to allow a third-party to have a "private"
* state dir. -JL
*/
void add_extra_state_dir(std::string s) {
extra_state_dirs.push_back(s);
}
// -- service interface --
/**
* Add c to the list of callbacks waiting for us to become active.
*
* @param c A callback
*/
void wait_for_active(MonOpRequestRef op, Context *c) {
if (op)
op->mark_event("paxos:wait_for_active");
waiting_for_active.push_back(c);
}
void wait_for_active(Context *c) {
MonOpRequestRef o;
wait_for_active(o, c);
}
/**
* Trim the Paxos state as much as we can.
*/
void trim();
/**
* Check if we should trim.
*
* If trimming is disabled, we must take that into consideration and only
* return true if we are positively sure that we should trim soon.
*
* @returns true if we should trim; false otherwise.
*/
bool should_trim() {
int available_versions = get_version() - get_first_committed();
int maximum_versions = g_conf()->paxos_min + g_conf()->paxos_trim_min;
if (trimming || (available_versions <= maximum_versions))
return false;
return true;
}
bool is_plugged() const {
return plugged;
}
void plug() {
ceph_assert(plugged == false);
plugged = true;
}
void unplug() {
ceph_assert(plugged == true);
plugged = false;
}
// read
/**
* @defgroup Paxos_h_read_funcs Read-related functions
* @{
*/
/**
* Get latest committed version
*
* @return latest committed version
*/
version_t get_version() { return last_committed; }
/**
* Get first committed version
*
* @return the first committed version
*/
version_t get_first_committed() { return first_committed; }
/**
* Check if a given version is readable.
*
* A version may not be readable for a myriad of reasons:
* @li the version @e v is higher that the last committed version
* @li we are not the Leader nor a Peon (election may be on-going)
* @li we do not have a committed value yet
* @li we do not have a valid lease
*
* @param seen The version we want to check if it is readable.
* @return 'true' if the version is readable; 'false' otherwise.
*/
bool is_readable(version_t seen=0);
/**
* Read version @e v and store its value in @e bl
*
* @param[in] v The version we want to read
* @param[out] bl The version's value
* @return 'true' if we successfully read the value; 'false' otherwise
*/
bool read(version_t v, ceph::buffer::list &bl);
/**
* Read the latest committed version
*
* @param[out] bl The version's value
* @return the latest committed version if we successfully read the value;
* or 0 (zero) otherwise.
*/
version_t read_current(ceph::buffer::list &bl);
/**
* Add onreadable to the list of callbacks waiting for us to become readable.
*
* @param onreadable A callback
*/
void wait_for_readable(MonOpRequestRef op, Context *onreadable) {
ceph_assert(!is_readable());
if (op)
op->mark_event("paxos:wait_for_readable");
waiting_for_readable.push_back(onreadable);
}
void wait_for_readable(Context *onreadable) {
MonOpRequestRef o;
wait_for_readable(o, onreadable);
}
/**
* @}
*/
/**
* Check if we have a valid lease.
*
* @returns true if the lease is still valid; false otherwise.
*/
bool is_lease_valid();
// write
/**
* @defgroup Paxos_h_write_funcs Write-related functions
* @{
*/
/**
* Check if we are writeable.
*
* We are writeable if we are alone (i.e., a quorum of one), or if we match
* all the following conditions:
* @li We are the Leader
* @li We are on STATE_ACTIVE
* @li We have a valid lease
*
* @return 'true' if we are writeable; 'false' otherwise.
*/
bool is_writeable();
/**
* Add c to the list of callbacks waiting for us to become writeable.
*
* @param c A callback
*/
void wait_for_writeable(MonOpRequestRef op, Context *c) {
ceph_assert(!is_writeable());
if (op)
op->mark_event("paxos:wait_for_writeable");
waiting_for_writeable.push_back(c);
}
void wait_for_writeable(Context *c) {
MonOpRequestRef o;
wait_for_writeable(o, c);
}
/**
* Get a transaction to submit operations to propose against
*
* Apply operations to this transaction. It will eventually be proposed
* to paxos.
*/
MonitorDBStore::TransactionRef get_pending_transaction();
/**
* Queue a completion for the pending proposal
*
* This completion will get triggered when the pending proposal
* transaction commits.
*/
void queue_pending_finisher(Context *onfinished);
/**
* (try to) trigger a proposal
*
* Tell paxos that it should submit the pending proposal. Note that if it
* is not active (e.g., because it is already in the midst of committing
* something) that will be deferred (e.g., until the current round finishes).
*/
bool trigger_propose();
/**
* @}
*/
/**
* @}
*/
protected:
MonitorDBStore *get_store();
};
inline std::ostream& operator<<(std::ostream& out, Paxos::C_Proposal& p)
{
std::string proposed = (p.proposed ? "proposed" : "unproposed");
out << " " << proposed
<< " queued " << (ceph_clock_now() - p.proposal_time)
<< " tx dump:\n";
auto t(std::make_shared<MonitorDBStore::Transaction>());
auto p_it = p.bl.cbegin();
t->decode(p_it);
ceph::JSONFormatter f(true);
t->dump(&f);
f.flush(out);
return out;
}
#endif
| 43,642 | 30.511191 | 86 |
h
|
null |
ceph-main/src/mon/PaxosFSMap.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_PAXOS_FSMAP_H
#define CEPH_PAXOS_FSMAP_H
#include "mds/FSMap.h"
#include "mds/MDSMap.h"
#include "include/ceph_assert.h"
class PaxosFSMap {
public:
virtual ~PaxosFSMap() {}
const FSMap &get_pending_fsmap() const { ceph_assert(is_leader()); return pending_fsmap; }
const FSMap &get_fsmap() const { return fsmap; }
virtual bool is_leader() const = 0;
protected:
FSMap &get_pending_fsmap_writeable() { ceph_assert(is_leader()); return pending_fsmap; }
FSMap &create_pending() {
ceph_assert(is_leader());
pending_fsmap = fsmap;
pending_fsmap.epoch++;
return pending_fsmap;
}
void decode(ceph::buffer::list &bl) {
fsmap.decode(bl);
pending_fsmap = FSMap(); /* nuke it to catch invalid access */
}
private:
/* Keep these PRIVATE to prevent unprotected manipulation. */
FSMap fsmap; /* the current epoch */
FSMap pending_fsmap; /* the next epoch */
};
#endif
| 1,350 | 23.563636 | 92 |
h
|
null |
ceph-main/src/mon/PaxosService.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "PaxosService.h"
#include "common/Clock.h"
#include "common/config.h"
#include "include/stringify.h"
#include "include/ceph_assert.h"
#include "mon/MonOpRequest.h"
using std::ostream;
using std::string;
using ceph::bufferlist;
#define dout_subsys ceph_subsys_paxos
#undef dout_prefix
#define dout_prefix _prefix(_dout, mon, paxos, service_name, get_first_committed(), get_last_committed())
static ostream& _prefix(std::ostream *_dout, Monitor &mon, Paxos &paxos, string service_name,
version_t fc, version_t lc) {
return *_dout << "mon." << mon.name << "@" << mon.rank
<< "(" << mon.get_state_name()
<< ").paxosservice(" << service_name << " " << fc << ".." << lc << ") ";
}
bool PaxosService::dispatch(MonOpRequestRef op)
{
ceph_assert(op->is_type_service() || op->is_type_command());
auto m = op->get_req<PaxosServiceMessage>();
op->mark_event("psvc:dispatch");
dout(10) << __func__ << " " << m << " " << *m
<< " from " << m->get_orig_source_inst()
<< " con " << m->get_connection() << dendl;
if (mon.is_shutdown()) {
return true;
}
// make sure this message isn't forwarded from a previous election epoch
if (m->rx_election_epoch &&
m->rx_election_epoch < mon.get_epoch()) {
dout(10) << " discarding forwarded message from previous election epoch "
<< m->rx_election_epoch << " < " << mon.get_epoch() << dendl;
return true;
}
// make sure the client is still connected. note that a proxied
// connection will be disconnected with a null message; don't drop
// those. also ignore loopback (e.g., log) messages.
if (m->get_connection() &&
!m->get_connection()->is_connected() &&
m->get_connection() != mon.con_self &&
m->get_connection()->get_messenger() != NULL) {
dout(10) << " discarding message from disconnected client "
<< m->get_source_inst() << " " << *m << dendl;
return true;
}
// make sure our map is readable and up to date
if (!is_readable(m->version)) {
dout(10) << " waiting for paxos -> readable (v" << m->version << ")" << dendl;
wait_for_readable(op, new C_RetryMessage(this, op), m->version);
return true;
}
// preprocess
if (preprocess_query(op))
return true; // easy!
// leader?
if (!mon.is_leader()) {
mon.forward_request_leader(op);
return true;
}
// writeable?
if (!is_writeable()) {
dout(10) << " waiting for paxos -> writeable" << dendl;
wait_for_writeable(op, new C_RetryMessage(this, op));
return true;
}
// update
if (!prepare_update(op)) {
// no changes made.
return true;
}
if (need_immediate_propose) {
dout(10) << __func__ << " forced immediate propose" << dendl;
propose_pending();
return true;
}
double delay = 0.0;
if (!should_propose(delay)) {
dout(10) << " not proposing" << dendl;
return true;
}
if (delay == 0.0) {
propose_pending();
return true;
}
// delay a bit
if (!proposal_timer) {
/**
* Callback class used to propose the pending value once the proposal_timer
* fires up.
*/
auto do_propose = new C_MonContext{&mon, [this](int r) {
proposal_timer = 0;
if (r >= 0) {
propose_pending();
} else if (r == -ECANCELED || r == -EAGAIN) {
return;
} else {
ceph_abort_msg("bad return value for proposal_timer");
}
}};
dout(10) << " setting proposal_timer " << do_propose
<< " with delay of " << delay << dendl;
proposal_timer = mon.timer.add_event_after(delay, do_propose);
} else {
dout(10) << " proposal_timer already set" << dendl;
}
return true;
}
void PaxosService::refresh(bool *need_bootstrap)
{
// update cached versions
cached_first_committed = mon.store->get(get_service_name(), first_committed_name);
cached_last_committed = mon.store->get(get_service_name(), last_committed_name);
version_t new_format = get_value("format_version");
if (new_format != format_version) {
dout(1) << __func__ << " upgraded, format " << format_version << " -> " << new_format << dendl;
on_upgrade();
}
format_version = new_format;
dout(10) << __func__ << dendl;
update_from_paxos(need_bootstrap);
}
void PaxosService::post_refresh()
{
dout(10) << __func__ << dendl;
post_paxos_update();
if (mon.is_peon() && !waiting_for_finished_proposal.empty()) {
finish_contexts(g_ceph_context, waiting_for_finished_proposal, -EAGAIN);
}
}
bool PaxosService::should_propose(double& delay)
{
// simple default policy: quick startup, then some damping.
if (get_last_committed() <= 1) {
delay = 0.0;
} else {
utime_t now = ceph_clock_now();
if ((now - paxos.last_commit_time) > g_conf()->paxos_propose_interval)
delay = (double)g_conf()->paxos_min_wait;
else
delay = (double)(g_conf()->paxos_propose_interval + paxos.last_commit_time
- now);
}
return true;
}
void PaxosService::propose_pending()
{
dout(10) << __func__ << dendl;
ceph_assert(have_pending);
ceph_assert(!proposing);
ceph_assert(mon.is_leader());
ceph_assert(is_active());
if (proposal_timer) {
dout(10) << " canceling proposal_timer " << proposal_timer << dendl;
mon.timer.cancel_event(proposal_timer);
proposal_timer = NULL;
}
/**
* @note What we contribute to the pending Paxos transaction is
* obtained by calling a function that must be implemented by
* the class implementing us. I.e., the function
* encode_pending will be the one responsible to encode
* whatever is pending on the implementation class into a
* bufferlist, so we can then propose that as a value through
* Paxos.
*/
MonitorDBStore::TransactionRef t = paxos.get_pending_transaction();
if (should_stash_full())
encode_full(t);
encode_pending(t);
have_pending = false;
if (format_version > 0) {
t->put(get_service_name(), "format_version", format_version);
}
// apply to paxos
proposing = true;
need_immediate_propose = false; /* reset whenever we propose */
/**
* Callback class used to mark us as active once a proposal finishes going
* through Paxos.
*
* We should wake people up *only* *after* we inform the service we
* just went active. And we should wake people up only once we finish
* going active. This is why we first go active, avoiding to wake up the
* wrong people at the wrong time, such as waking up a C_RetryMessage
* before waking up a C_Active, thus ending up without a pending value.
*/
class C_Committed : public Context {
PaxosService *ps;
public:
explicit C_Committed(PaxosService *p) : ps(p) { }
void finish(int r) override {
ps->proposing = false;
if (r >= 0)
ps->_active();
else if (r == -ECANCELED || r == -EAGAIN)
return;
else
ceph_abort_msg("bad return value for C_Committed");
}
};
paxos.queue_pending_finisher(new C_Committed(this));
paxos.trigger_propose();
}
bool PaxosService::should_stash_full()
{
version_t latest_full = get_version_latest_full();
/* @note The first member of the condition is moot and it is here just for
* clarity's sake. The second member would end up returing true
* nonetheless because, in that event,
* latest_full == get_trim_to() == 0.
*/
return (!latest_full ||
(latest_full <= get_trim_to()) ||
(get_last_committed() - latest_full > (version_t)g_conf()->paxos_stash_full_interval));
}
void PaxosService::restart()
{
dout(10) << __func__ << dendl;
if (proposal_timer) {
dout(10) << " canceling proposal_timer " << proposal_timer << dendl;
mon.timer.cancel_event(proposal_timer);
proposal_timer = 0;
}
finish_contexts(g_ceph_context, waiting_for_finished_proposal, -EAGAIN);
if (have_pending) {
discard_pending();
have_pending = false;
}
proposing = false;
on_restart();
}
void PaxosService::election_finished()
{
dout(10) << __func__ << dendl;
finish_contexts(g_ceph_context, waiting_for_finished_proposal, -EAGAIN);
// make sure we update our state
_active();
}
void PaxosService::_active()
{
if (is_proposing()) {
dout(10) << __func__ << " - proposing" << dendl;
return;
}
if (!is_active()) {
dout(10) << __func__ << " - not active" << dendl;
/**
* Callback used to make sure we call the PaxosService::_active function
* whenever a condition is fulfilled.
*
* This is used in multiple situations, from waiting for the Paxos to commit
* our proposed value, to waiting for the Paxos to become active once an
* election is finished.
*/
class C_Active : public Context {
PaxosService *svc;
public:
explicit C_Active(PaxosService *s) : svc(s) {}
void finish(int r) override {
if (r >= 0)
svc->_active();
}
};
wait_for_active_ctx(new C_Active(this));
return;
}
dout(10) << __func__ << dendl;
// create pending state?
if (mon.is_leader()) {
dout(7) << __func__ << " creating new pending" << dendl;
if (!have_pending) {
create_pending();
have_pending = true;
}
if (get_last_committed() == 0) {
// create initial state
create_initial();
propose_pending();
return;
}
} else {
dout(7) << __func__ << " we are not the leader, hence we propose nothing!" << dendl;
}
// wake up anyone who came in while we were proposing. note that
// anyone waiting for the previous proposal to commit is no longer
// on this list; it is on Paxos's.
finish_contexts(g_ceph_context, waiting_for_finished_proposal, 0);
if (mon.is_leader())
upgrade_format();
// NOTE: it's possible that this will get called twice if we commit
// an old paxos value. Implementations should be mindful of that.
on_active();
}
void PaxosService::shutdown()
{
cancel_events();
if (proposal_timer) {
dout(10) << " canceling proposal_timer " << proposal_timer << dendl;
mon.timer.cancel_event(proposal_timer);
proposal_timer = 0;
}
finish_contexts(g_ceph_context, waiting_for_finished_proposal, -EAGAIN);
on_shutdown();
}
void PaxosService::maybe_trim()
{
if (!is_writeable())
return;
const version_t first_committed = get_first_committed();
version_t trim_to = get_trim_to();
dout(20) << __func__ << " " << first_committed << "~" << trim_to << dendl;
if (trim_to < first_committed) {
dout(10) << __func__ << " trim_to " << trim_to << " < first_committed "
<< first_committed << dendl;
return;
}
version_t to_remove = trim_to - first_committed;
const version_t trim_min = g_conf().get_val<version_t>("paxos_service_trim_min");
if (trim_min > 0 &&
to_remove < trim_min) {
dout(10) << __func__ << " trim_to " << trim_to << " would only trim " << to_remove
<< " < paxos_service_trim_min " << trim_min << dendl;
return;
}
to_remove = [to_remove, trim_to, this] {
const version_t trim_max = g_conf().get_val<version_t>("paxos_service_trim_max");
if (trim_max == 0 || to_remove < trim_max) {
return to_remove;
}
if (to_remove < trim_max * 1.5) {
dout(10) << __func__ << " trim to " << trim_to << " would only trim " << to_remove
<< " > paxos_service_trim_max, limiting to " << trim_max
<< dendl;
return trim_max;
}
const version_t new_trim_max = (trim_max + to_remove) / 2;
const uint64_t trim_max_multiplier = g_conf().get_val<uint64_t>("paxos_service_trim_max_multiplier");
if (trim_max_multiplier) {
return std::min(new_trim_max, trim_max * trim_max_multiplier);
} else {
return new_trim_max;
}
}();
trim_to = first_committed + to_remove;
dout(10) << __func__ << " trimming to " << trim_to << ", " << to_remove << " states" << dendl;
MonitorDBStore::TransactionRef t = paxos.get_pending_transaction();
trim(t, first_committed, trim_to);
put_first_committed(t, trim_to);
cached_first_committed = trim_to;
// let the service add any extra stuff
encode_trim_extra(t, trim_to);
paxos.trigger_propose();
}
void PaxosService::trim(MonitorDBStore::TransactionRef t,
version_t from, version_t to)
{
dout(10) << __func__ << " from " << from << " to " << to << dendl;
ceph_assert(from != to);
for (version_t v = from; v < to; ++v) {
dout(20) << __func__ << " " << v << dendl;
t->erase(get_service_name(), v);
string full_key = mon.store->combine_strings("full", v);
if (mon.store->exists(get_service_name(), full_key)) {
dout(20) << __func__ << " " << full_key << dendl;
t->erase(get_service_name(), full_key);
}
}
if (g_conf()->mon_compact_on_trim) {
dout(20) << " compacting prefix " << get_service_name() << dendl;
t->compact_range(get_service_name(), stringify(from - 1), stringify(to));
t->compact_range(get_service_name(),
mon.store->combine_strings(full_prefix_name, from - 1),
mon.store->combine_strings(full_prefix_name, to));
}
}
void PaxosService::load_health()
{
bufferlist bl;
mon.store->get("health", service_name, bl);
if (bl.length()) {
auto p = bl.cbegin();
using ceph::decode;
decode(health_checks, p);
}
}
| 13,664 | 28.261242 | 105 |
cc
|
null |
ceph-main/src/mon/PaxosService.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_PAXOSSERVICE_H
#define CEPH_PAXOSSERVICE_H
#include "include/Context.h"
#include "Paxos.h"
#include "Monitor.h"
#include "MonitorDBStore.h"
/**
* A Paxos Service is an abstraction that easily allows one to obtain an
* association between a Monitor and a Paxos class, in order to implement any
* service.
*/
class PaxosService {
/**
* @defgroup PaxosService_h_class Paxos Service
* @{
*/
public:
/**
* The Monitor to which this class is associated with
*/
Monitor &mon;
/**
* The Paxos instance to which this class is associated with
*/
Paxos &paxos;
/**
* Our name. This will be associated with the class implementing us, and will
* be used mainly for store-related operations.
*/
std::string service_name;
/**
* If we are or have queued anything for proposal, this variable will be true
* until our proposal has been finished.
*/
bool proposing;
bool need_immediate_propose = false;
protected:
/**
* Services implementing us used to depend on the Paxos version, back when
* each service would have a Paxos instance for itself. However, now we only
* have a single Paxos instance, shared by all the services. Each service now
* must keep its own version, if so they wish. This variable should be used
* for that purpose.
*/
version_t service_version;
private:
/**
* Event callback responsible for proposing our pending value once a timer
* runs out and fires.
*/
Context *proposal_timer;
/**
* If the implementation class has anything pending to be proposed to Paxos,
* then have_pending should be true; otherwise, false.
*/
bool have_pending;
/**
* health checks for this service
*
* Child must populate this during encode_pending() by calling encode_health().
*/
health_check_map_t health_checks;
protected:
/**
* format of our state in RocksDB, 0 for default
*/
version_t format_version;
public:
const health_check_map_t& get_health_checks() const {
return health_checks;
}
/**
* @defgroup PaxosService_h_callbacks Callback classes
* @{
*/
/**
* Retry dispatching a given service message
*
* This callback class is used when we had to wait for some condition to
* become true while we were dispatching it.
*
* For instance, if the message's version isn't readable, according to Paxos,
* then we must wait for it to become readable. So, we just queue an
* instance of this class onto the Paxos::wait_for_readable function, and
* we will retry the whole dispatch again once the callback is fired.
*/
class C_RetryMessage : public C_MonOp {
PaxosService *svc;
public:
C_RetryMessage(PaxosService *s, MonOpRequestRef op_) :
C_MonOp(op_), svc(s) { }
void _finish(int r) override {
if (r == -EAGAIN || r >= 0)
svc->dispatch(op);
else if (r == -ECANCELED)
return;
else
ceph_abort_msg("bad C_RetryMessage return value");
}
};
class C_ReplyOp : public C_MonOp {
Monitor &mon;
MonOpRequestRef op;
MessageRef reply;
public:
C_ReplyOp(PaxosService *s, MonOpRequestRef o, MessageRef r) :
C_MonOp(o), mon(s->mon), op(o), reply(r) { }
void _finish(int r) override {
if (r >= 0) {
mon.send_reply(op, reply.detach());
}
}
};
/**
* @}
*/
/**
* @param mn A Monitor instance
* @param p A Paxos instance
* @param name Our service's name.
*/
PaxosService(Monitor &mn, Paxos &p, std::string name)
: mon(mn), paxos(p), service_name(name),
proposing(false),
service_version(0), proposal_timer(0), have_pending(false),
format_version(0),
last_committed_name("last_committed"),
first_committed_name("first_committed"),
full_prefix_name("full"), full_latest_name("latest"),
cached_first_committed(0), cached_last_committed(0)
{
}
virtual ~PaxosService() {}
/**
* Get the service's name.
*
* @returns The service's name.
*/
const std::string& get_service_name() const { return service_name; }
/**
* Get the store prefixes we utilize
*/
virtual void get_store_prefixes(std::set<std::string>& s) const {
s.insert(service_name);
}
// i implement and you ignore
/**
* Informs this instance that it should consider itself restarted.
*
* This means that we will cancel our proposal_timer event, if any exists.
*/
void restart();
/**
* Informs this instance that an election has finished.
*
* This means that we will invoke a PaxosService::discard_pending while
* setting have_pending to false (basically, ignore our pending state) and
* we will then make sure we obtain a new state.
*
* Our state shall be updated by PaxosService::_active if the Paxos is
* active; otherwise, we will wait for it to become active by adding a
* PaxosService::C_Active callback to it.
*/
void election_finished();
/**
* Informs this instance that it is supposed to shutdown.
*
* Basically, it will instruct Paxos to cancel all events/callbacks and then
* will cancel the proposal_timer event if any exists.
*/
void shutdown();
private:
/**
* Update our state by updating it from Paxos, and then creating a new
* pending state if need be.
*
* @remarks We only create a pending state we our Monitor is the Leader.
*
* @pre Paxos is active
* @post have_pending is true if our Monitor is the Leader and Paxos is
* active
*/
void _active();
public:
/**
* Propose a new value through Paxos.
*
* This function should be called by the classes implementing
* PaxosService, in order to propose a new value through Paxos.
*
* @pre The implementation class implements the encode_pending function.
* @pre have_pending is true
* @pre Our monitor is the Leader
* @pre Paxos is active
* @post Cancel the proposal timer, if any
* @post have_pending is false
* @post propose pending value through Paxos
*
* @note This function depends on the implementation of encode_pending on
* the class that is implementing PaxosService
*/
void propose_pending();
/**
* Let others request us to propose.
*
* At the moment, this is just a wrapper to propose_pending() with an
* extra check for is_writeable(), but it's a good practice to dissociate
* requests for proposals from direct usage of propose_pending() for
* future use -- we might want to perform additional checks or put a
* request on hold, for instance.
*/
void request_proposal() {
ceph_assert(is_writeable());
propose_pending();
}
/**
* Request service @p other to perform a proposal.
*
* We could simply use the function above, requesting @p other directly,
* but we might eventually want to do something to the request -- say,
* set a flag stating we're waiting on a cross-proposal to be finished.
*/
void request_proposal(PaxosService *other) {
ceph_assert(other != NULL);
ceph_assert(other->is_writeable());
other->request_proposal();
}
/**
* Dispatch a message by passing it to several different functions that are
* either implemented directly by this service, or that should be implemented
* by the class implementing this service.
*
* @param m A message
* @returns 'true' on successful dispatch; 'false' otherwise.
*/
bool dispatch(MonOpRequestRef op);
void refresh(bool *need_bootstrap);
void post_refresh();
/**
* @defgroup PaxosService_h_override_funcs Functions that should be
* overridden.
*
* These functions should be overridden at will by the class implementing
* this service.
* @{
*/
/**
* Create the initial state for your system.
*
* In some of ours the state is actually set up elsewhere so this does
* nothing.
*/
virtual void create_initial() = 0;
/**
* Query the Paxos system for the latest state and apply it if it's newer
* than the current Monitor state.
*/
virtual void update_from_paxos(bool *need_bootstrap) = 0;
/**
* Hook called after all services have refreshed their state from paxos
*
* This is useful for doing any update work that depends on other
* service's having up-to-date state.
*/
virtual void post_paxos_update() {}
/**
* Init on startup
*
* This is called on mon startup, after all of the PaxosService instances'
* update_from_paxos() methods have been called
*/
virtual void init() {}
/**
* Create the pending state.
*
* @invariant This function is only called on a Leader.
* @remarks This created state is then modified by incoming messages.
* @remarks Called at startup and after every Paxos ratification round.
*/
virtual void create_pending() = 0;
/**
* Encode the pending state into a ceph::buffer::list for ratification and
* transmission as the next state.
*
* @invariant This function is only called on a Leader.
*
* @param t The transaction to hold all changes.
*/
virtual void encode_pending(MonitorDBStore::TransactionRef t) = 0;
/**
* Discard the pending state
*
* @invariant This function is only called on a Leader.
*
* @remarks This function is NOT overridden in any of our code, but it is
* called in PaxosService::election_finished if have_pending is
* true.
*/
virtual void discard_pending() { }
/**
* Look at the query; if the query can be handled without changing state,
* do so.
*
* @param m A query message
* @returns 'true' if the query was handled (e.g., was a read that got
* answered, was a state change that has no effect); 'false'
* otherwise.
*/
virtual bool preprocess_query(MonOpRequestRef op) = 0;
/**
* Apply the message to the pending state.
*
* @invariant This function is only called on a Leader.
*
* @param m An update message
* @returns 'true' if the pending state should be proposed; 'false' otherwise.
*/
virtual bool prepare_update(MonOpRequestRef op) = 0;
/**
* @}
*/
/**
* Determine if the Paxos system should vote on pending, and if so how long
* it should wait to vote.
*
* @param[out] delay The wait time, used so we can limit the update traffic
* spamming.
* @returns 'true' if the Paxos system should propose; 'false' otherwise.
*/
virtual bool should_propose(double &delay);
/**
* force an immediate propose.
*
* This is meant to be called from prepare_update(op).
*/
void force_immediate_propose() {
need_immediate_propose = true;
}
/**
* @defgroup PaxosService_h_courtesy Courtesy functions
*
* Courtesy functions, in case the class implementing this service has
* anything it wants/needs to do at these times.
* @{
*/
/**
* This is called when the Paxos state goes to active.
*
* On the peon, this is after each election.
* On the leader, this is after each election, *and* after each completed
* proposal.
*
* @note This function may get called twice in certain recovery cases.
*/
virtual void on_active() { }
/**
* This is called when we are shutting down
*/
virtual void on_shutdown() {}
/**
* this is called when activating on the leader
*
* it should conditionally upgrade the on-disk format by proposing a transaction
*/
virtual void upgrade_format() { }
/**
* this is called when we detect the store has just upgraded underneath us
*/
virtual void on_upgrade() {}
/**
* Called when the Paxos system enters a Leader election.
*
* @remarks It's a courtesy method, in case the class implementing this
* service has anything it wants/needs to do at that time.
*/
virtual void on_restart() { }
/**
* @}
*/
/**
* Tick.
*/
virtual void tick() {}
void encode_health(const health_check_map_t& next,
MonitorDBStore::TransactionRef t) {
using ceph::encode;
ceph::buffer::list bl;
encode(next, bl);
t->put("health", service_name, bl);
mon.log_health(next, health_checks, t);
}
void load_health();
/**
* @defgroup PaxosService_h_store_keys Set of keys that are usually used on
* all the services implementing this
* class, and, being almost the only keys
* used, should be standardized to avoid
* mistakes.
* @{
*/
const std::string last_committed_name;
const std::string first_committed_name;
const std::string full_prefix_name;
const std::string full_latest_name;
/**
* @}
*/
private:
/**
* @defgroup PaxosService_h_version_cache Variables holding cached values
* for the most used versions (first
* and last committed); we only have
* to read them when the store is
* updated, so in-between updates we
* may very well use cached versions
* and avoid the overhead.
* @{
*/
version_t cached_first_committed;
version_t cached_last_committed;
/**
* @}
*/
/**
* Callback list to be used whenever we are running a proposal through
* Paxos. These callbacks will be awaken whenever the said proposal
* finishes.
*/
std::list<Context*> waiting_for_finished_proposal;
public:
/**
* Check if we are proposing a value through Paxos
*
* @returns true if we are proposing; false otherwise.
*/
bool is_proposing() const {
return proposing;
}
/**
* Check if we are in the Paxos ACTIVE state.
*
* @note This function is a wrapper for Paxos::is_active
*
* @returns true if in state ACTIVE; false otherwise.
*/
bool is_active() const {
return
!is_proposing() &&
(paxos.is_active() || paxos.is_updating() || paxos.is_writing());
}
/**
* Check if we are readable.
*
* This mirrors on the paxos check, except that we also verify that
*
* - the client hasn't seen the future relative to this PaxosService
* - this service isn't proposing.
* - we have committed our initial state (last_committed > 0)
*
* @param ver The version we want to check if is readable
* @returns true if it is readable; false otherwise
*/
bool is_readable(version_t ver = 0) const {
if (ver > get_last_committed() ||
!paxos.is_readable(0) ||
get_last_committed() == 0)
return false;
return true;
}
/**
* Check if we are writeable.
*
* We consider to be writeable iff:
*
* - we are not proposing a new version;
* - we are ready to be written to -- i.e., we have a pending value.
* - paxos is (active or updating or writing or refresh)
*
* @returns true if writeable; false otherwise
*/
bool is_writeable() const {
return is_active() && have_pending;
}
/**
* Wait for a proposal to finish.
*
* Add a callback to be awaken whenever our current proposal finishes being
* proposed through Paxos.
*
* @param c The callback to be awaken once the proposal is finished.
*/
void wait_for_finished_proposal(MonOpRequestRef op, Context *c) {
if (op)
op->mark_event(service_name + ":wait_for_finished_proposal");
waiting_for_finished_proposal.push_back(c);
}
void wait_for_finished_proposal_ctx(Context *c) {
MonOpRequestRef o;
wait_for_finished_proposal(o, c);
}
/**
* Wait for us to become active
*
* @param c The callback to be awaken once we become active.
*/
void wait_for_active(MonOpRequestRef op, Context *c) {
if (op)
op->mark_event(service_name + ":wait_for_active");
if (!is_proposing()) {
paxos.wait_for_active(op, c);
return;
}
wait_for_finished_proposal(op, c);
}
void wait_for_active_ctx(Context *c) {
MonOpRequestRef o;
wait_for_active(o, c);
}
/**
* Wait for us to become readable
*
* @param c The callback to be awaken once we become active.
* @param ver The version we want to wait on.
*/
void wait_for_readable(MonOpRequestRef op, Context *c, version_t ver = 0) {
/* This is somewhat of a hack. We only do check if a version is readable on
* PaxosService::dispatch(), but, nonetheless, we must make sure that if that
* is why we are not readable, then we must wait on PaxosService and not on
* Paxos; otherwise, we may assert on Paxos::wait_for_readable() if it
* happens to be readable at that specific point in time.
*/
if (op)
op->mark_event(service_name + ":wait_for_readable");
if (is_proposing() ||
ver > get_last_committed() ||
get_last_committed() == 0)
wait_for_finished_proposal(op, c);
else {
if (op)
op->mark_event(service_name + ":wait_for_readable/paxos");
paxos.wait_for_readable(op, c);
}
}
void wait_for_readable_ctx(Context *c, version_t ver = 0) {
MonOpRequestRef o; // will initialize the shared_ptr to NULL
wait_for_readable(o, c, ver);
}
/**
* Wait for us to become writeable
*
* @param c The callback to be awaken once we become writeable.
*/
void wait_for_writeable(MonOpRequestRef op, Context *c) {
if (op)
op->mark_event(service_name + ":wait_for_writeable");
if (is_proposing())
wait_for_finished_proposal(op, c);
else if (!is_writeable())
wait_for_active(op, c);
else
paxos.wait_for_writeable(op, c);
}
void wait_for_writeable_ctx(Context *c) {
MonOpRequestRef o;
wait_for_writeable(o, c);
}
/**
* @defgroup PaxosService_h_Trim Functions for trimming states
* @{
*/
/**
* trim service states if appropriate
*
* Called at same interval as tick()
*/
void maybe_trim();
/**
* Auxiliary function to trim our state from version @p from to version
* @p to, not including; i.e., the interval [from, to[
*
* @param t The transaction to which we will add the trim operations.
* @param from the lower limit of the interval to be trimmed
* @param to the upper limit of the interval to be trimmed (not including)
*/
void trim(MonitorDBStore::TransactionRef t, version_t from, version_t to);
/**
* encode service-specific extra bits into trim transaction
*
* @param tx transaction
* @param first new first_committed value
*/
virtual void encode_trim_extra(MonitorDBStore::TransactionRef tx,
version_t first) {}
/**
* Get the version we should trim to.
*
* Should be overloaded by service if it wants to trim states.
*
* @returns the version we should trim to; if we return zero, it should be
* assumed that there's no version to trim to.
*/
virtual version_t get_trim_to() const {
return 0;
}
/**
* @}
*/
/**
* @defgroup PaxosService_h_Stash_Full
* @{
*/
virtual bool should_stash_full();
/**
* Encode a full version on @p t
*
* @note We force every service to implement this function, since we strongly
* desire the encoding of full versions.
* @note Services that do not trim their state, will be bound to only create
* one full version. Full version stashing is determined/controlled by
* trimming: we stash a version each time a trim is bound to erase the
* latest full version.
*
* @param t Transaction on which the full version shall be encoded.
*/
virtual void encode_full(MonitorDBStore::TransactionRef t) = 0;
/**
* @}
*/
/**
* Cancel events.
*
* @note This function is a wrapper for Paxos::cancel_events
*/
void cancel_events() {
paxos.cancel_events();
}
/**
* @defgroup PaxosService_h_store_funcs Back storage interface functions
* @{
*/
/**
* @defgroup PaxosService_h_store_modify Wrapper function interface to access
* the back store for modification
* purposes
* @{
*/
void put_first_committed(MonitorDBStore::TransactionRef t, version_t ver) {
t->put(get_service_name(), first_committed_name, ver);
}
/**
* Set the last committed version to @p ver
*
* @param t A transaction to which we add this put operation
* @param ver The last committed version number being put
*/
void put_last_committed(MonitorDBStore::TransactionRef t, version_t ver) {
t->put(get_service_name(), last_committed_name, ver);
/* We only need to do this once, and that is when we are about to make our
* first proposal. There are some services that rely on first_committed
* being set -- and it should! -- so we need to guarantee that it is,
* specially because the services itself do not do it themselves. They do
* rely on it, but they expect us to deal with it, and so we shall.
*/
if (!get_first_committed())
put_first_committed(t, ver);
}
/**
* Put the contents of @p bl into version @p ver
*
* @param t A transaction to which we will add this put operation
* @param ver The version to which we will add the value
* @param bl A ceph::buffer::list containing the version's value
*/
void put_version(MonitorDBStore::TransactionRef t, version_t ver,
ceph::buffer::list& bl) {
t->put(get_service_name(), ver, bl);
}
/**
* Put the contents of @p bl into a full version key for this service, that
* will be created with @p ver in mind.
*
* @param t The transaction to which we will add this put operation
* @param ver A version number
* @param bl A ceph::buffer::list containing the version's value
*/
void put_version_full(MonitorDBStore::TransactionRef t,
version_t ver, ceph::buffer::list& bl) {
std::string key = mon.store->combine_strings(full_prefix_name, ver);
t->put(get_service_name(), key, bl);
}
/**
* Put the version number in @p ver into the key pointing to the latest full
* version of this service.
*
* @param t The transaction to which we will add this put operation
* @param ver A version number
*/
void put_version_latest_full(MonitorDBStore::TransactionRef t, version_t ver) {
std::string key = mon.store->combine_strings(full_prefix_name, full_latest_name);
t->put(get_service_name(), key, ver);
}
/**
* Put the contents of @p bl into the key @p key.
*
* @param t A transaction to which we will add this put operation
* @param key The key to which we will add the value
* @param bl A ceph::buffer::list containing the value
*/
void put_value(MonitorDBStore::TransactionRef t,
const std::string& key, ceph::buffer::list& bl) {
t->put(get_service_name(), key, bl);
}
/**
* Put integer value @v into the key @p key.
*
* @param t A transaction to which we will add this put operation
* @param key The key to which we will add the value
* @param v An integer
*/
void put_value(MonitorDBStore::TransactionRef t,
const std::string& key, version_t v) {
t->put(get_service_name(), key, v);
}
/**
* @}
*/
/**
* @defgroup PaxosService_h_store_get Wrapper function interface to access
* the back store for reading purposes
* @{
*/
/**
* @defgroup PaxosService_h_version_cache Obtain cached versions for this
* service.
* @{
*/
/**
* Get the first committed version
*
* @returns Our first committed version (that is available)
*/
version_t get_first_committed() const{
return cached_first_committed;
}
/**
* Get the last committed version
*
* @returns Our last committed version
*/
version_t get_last_committed() const{
return cached_last_committed;
}
/**
* @}
*/
/**
* Get the contents of a given version @p ver
*
* @param ver The version being obtained
* @param bl The ceph::buffer::list to be populated
* @return 0 on success; <0 otherwise
*/
virtual int get_version(version_t ver, ceph::buffer::list& bl) {
return mon.store->get(get_service_name(), ver, bl);
}
/**
* Get the contents of a given full version of this service.
*
* @param ver A version number
* @param bl The ceph::buffer::list to be populated
* @returns 0 on success; <0 otherwise
*/
virtual int get_version_full(version_t ver, ceph::buffer::list& bl) {
std::string key = mon.store->combine_strings(full_prefix_name, ver);
return mon.store->get(get_service_name(), key, bl);
}
/**
* Get the latest full version number
*
* @returns A version number
*/
version_t get_version_latest_full() {
std::string key = mon.store->combine_strings(full_prefix_name, full_latest_name);
return mon.store->get(get_service_name(), key);
}
/**
* Get a value from a given key.
*
* @param[in] key The key
* @param[out] bl The ceph::buffer::list to be populated with the value
*/
int get_value(const std::string& key, ceph::buffer::list& bl) {
return mon.store->get(get_service_name(), key, bl);
}
/**
* Get an integer value from a given key.
*
* @param[in] key The key
*/
version_t get_value(const std::string& key) {
return mon.store->get(get_service_name(), key);
}
/**
* @}
*/
/**
* @}
*/
};
#endif
| 25,751 | 27.581576 | 85 |
h
|
null |
ceph-main/src/mon/Session.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MON_SESSION_H
#define CEPH_MON_SESSION_H
#include <string>
#include <string_view>
#include "include/utime.h"
#include "include/xlist.h"
#include "global/global_context.h"
#include "msg/msg_types.h"
#include "mon/mon_types.h"
#include "auth/AuthServiceHandler.h"
#include "osd/OSDMap.h"
#include "MonCap.h"
struct MonSession;
struct Subscription {
MonSession *session;
std::string type;
xlist<Subscription*>::item type_item;
version_t next;
bool onetime;
bool incremental_onetime; // has CEPH_FEATURE_INCSUBOSDMAP
Subscription(MonSession *s, const std::string& t) : session(s), type(t), type_item(this),
next(0), onetime(false), incremental_onetime(false) {}
};
struct MonSession : public RefCountedObject {
ConnectionRef con;
int con_type = 0;
uint64_t con_features = 0; // zero if AnonConnection
entity_name_t name;
entity_addrvec_t addrs;
entity_addr_t socket_addr;
utime_t session_timeout;
bool closed = false;
xlist<MonSession*>::item item;
std::set<uint64_t> routed_request_tids;
MonCap caps;
bool validated_stretch_connection = false;
bool authenticated = false; ///< true if auth handshake is complete
std::map<std::string, Subscription*> sub_map;
epoch_t osd_epoch = 0; ///< the osdmap epoch sent to the mon client
AuthServiceHandler *auth_handler = nullptr;
EntityName entity_name;
uint64_t global_id = 0;
global_id_status_t global_id_status = global_id_status_t::NONE;
ConnectionRef proxy_con;
uint64_t proxy_tid = 0;
std::string remote_host; ///< remote host name
std::map<std::string,std::string,std::less<>> last_config; ///< most recently shared config
bool any_config = false;
MonSession(Connection *c)
: RefCountedObject(g_ceph_context),
con(c),
item(this) { }
void _ident(const entity_name_t& n, const entity_addrvec_t& av) {
con_type = con->get_peer_type();
name = n;
addrs = av;
socket_addr = con->get_peer_socket_addr();
if (con->get_messenger()) {
// only fill in features if this is a non-anonymous connection
con_features = con->get_features();
}
}
~MonSession() override {
//generic_dout(0) << "~MonSession " << this << dendl;
// we should have been removed before we get destructed; see MonSessionMap::remove_session()
ceph_assert(!item.is_on_list());
ceph_assert(sub_map.empty());
delete auth_handler;
}
bool is_capable(std::string service, int mask) {
std::map<std::string,std::string> args;
return caps.is_capable(
g_ceph_context,
entity_name,
service, "", args,
mask & MON_CAP_R, mask & MON_CAP_W, mask & MON_CAP_X,
get_peer_socket_addr());
}
std::vector<std::string> get_allowed_fs_names() const {
return caps.allowed_fs_names();
}
bool fs_name_capable(std::string_view fsname, __u8 mask) {
return caps.fs_name_capable(entity_name, fsname, mask);
}
const entity_addr_t& get_peer_socket_addr() {
return socket_addr;
}
void dump(ceph::Formatter *f) const {
f->dump_stream("name") << name;
f->dump_stream("entity_name") << entity_name;
f->dump_object("addrs", addrs);
f->dump_object("socket_addr", socket_addr);
f->dump_string("con_type", ceph_entity_type_name(con_type));
f->dump_unsigned("con_features", con_features);
f->dump_stream("con_features_hex") << std::hex << con_features << std::dec;
f->dump_string("con_features_release",
ceph_release_name(ceph_release_from_features(con_features)));
f->dump_bool("open", !closed);
f->dump_object("caps", caps);
f->dump_bool("authenticated", authenticated);
f->dump_unsigned("global_id", global_id);
f->dump_stream("global_id_status") << global_id_status;
f->dump_unsigned("osd_epoch", osd_epoch);
f->dump_string("remote_host", remote_host);
}
};
struct MonSessionMap {
xlist<MonSession*> sessions;
std::map<std::string, xlist<Subscription*>* > subs;
std::multimap<int, MonSession*> by_osd;
FeatureMap feature_map; // type -> features -> count
MonSessionMap() {}
~MonSessionMap() {
while (!subs.empty()) {
ceph_assert(subs.begin()->second->empty());
delete subs.begin()->second;
subs.erase(subs.begin());
}
}
unsigned get_size() const {
return sessions.size();
}
void remove_session(MonSession *s) {
ceph_assert(!s->closed);
for (std::map<std::string,Subscription*>::iterator p = s->sub_map.begin(); p != s->sub_map.end(); ++p) {
p->second->type_item.remove_myself();
delete p->second;
}
s->sub_map.clear();
s->item.remove_myself();
if (s->name.is_osd() &&
s->name.num() >= 0) {
for (auto p = by_osd.find(s->name.num());
p->first == s->name.num();
++p)
if (p->second == s) {
by_osd.erase(p);
break;
}
}
if (s->con_features) {
feature_map.rm(s->con_type, s->con_features);
}
s->closed = true;
s->put();
}
MonSession *new_session(const entity_name_t& n,
const entity_addrvec_t& av,
Connection *c) {
MonSession *s = new MonSession(c);
ceph_assert(s);
s->_ident(n, av);
add_session(s);
return s;
}
void add_session(MonSession *s) {
s->session_timeout = ceph_clock_now();
s->session_timeout += g_conf()->mon_session_timeout;
sessions.push_back(&s->item);
s->get();
if (s->name.is_osd() &&
s->name.num() >= 0) {
by_osd.insert(std::pair<int,MonSession*>(s->name.num(), s));
}
if (s->con_features) {
feature_map.add(s->con_type, s->con_features);
}
}
MonSession *get_random_osd_session(OSDMap *osdmap) {
// ok, this isn't actually random, but close enough.
if (by_osd.empty())
return 0;
int n = by_osd.rbegin()->first + 1;
int r = rand() % n;
auto p = by_osd.lower_bound(r);
if (p == by_osd.end())
--p;
if (!osdmap) {
return p->second;
}
MonSession *s = NULL;
auto b = p;
auto f = p;
bool backward = true, forward = true;
while (backward || forward) {
if (backward) {
if (osdmap->is_up(b->first) &&
osdmap->get_addrs(b->first) == b->second->con->get_peer_addrs()) {
s = b->second;
break;
}
if (b != by_osd.begin())
--b;
else
backward = false;
}
forward = (f != by_osd.end());
if (forward) {
if (osdmap->is_up(f->first)) {
s = f->second;
break;
}
++f;
}
}
return s;
}
void add_update_sub(MonSession *s, const std::string& what, version_t start, bool onetime, bool incremental_onetime) {
Subscription *sub = 0;
if (s->sub_map.count(what)) {
sub = s->sub_map[what];
} else {
sub = new Subscription(s, what);
s->sub_map[what] = sub;
if (!subs.count(what))
subs[what] = new xlist<Subscription*>;
subs[what]->push_back(&sub->type_item);
}
sub->next = start;
sub->onetime = onetime;
sub->incremental_onetime = onetime && incremental_onetime;
}
void remove_sub(Subscription *sub) {
sub->session->sub_map.erase(sub->type);
sub->type_item.remove_myself();
delete sub;
}
};
inline std::ostream& operator<<(std::ostream& out, const MonSession& s)
{
out << "MonSession(" << s.name << " " << s.addrs
<< " is " << (s.closed ? "closed" : "open")
<< " " << s.caps
<< ", features 0x" << std::hex << s.con_features << std::dec
<< " (" << ceph_release_name(ceph_release_from_features(s.con_features))
<< "))";
return out;
}
#endif
| 8,043 | 26.175676 | 120 |
h
|
null |
ceph-main/src/mon/error_code.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string>
#include "common/error_code.h"
#include "common/errno.h"
#include "error_code.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
namespace bs = boost::system;
class mon_error_category : public ceph::converting_category {
public:
mon_error_category(){}
const char* name() const noexcept override;
const char* message(int ev, char*, std::size_t) const noexcept override;
std::string message(int ev) const override;
bs::error_condition default_error_condition(int ev) const noexcept
override;
bool equivalent(int ev, const bs::error_condition& c) const
noexcept override;
using ceph::converting_category::equivalent;
int from_code(int ev) const noexcept override;
};
const char* mon_error_category::name() const noexcept {
return "mon";
}
const char* mon_error_category::message(int ev, char* buf,
std::size_t len) const noexcept {
if (ev == 0)
return "No error";
if (len) {
auto s = cpp_strerror(ev);
auto n = s.copy(buf, len - 1);
*(buf + n) = '\0';
}
return buf;
}
std::string mon_error_category::message(int ev) const {
if (ev == 0)
return "No error";
return cpp_strerror(ev);
}
bs::error_condition
mon_error_category::default_error_condition(int ev) const noexcept {
return { ev, bs::generic_category() };
}
bool mon_error_category::equivalent(int ev,const bs::error_condition& c) const noexcept {
return default_error_condition(ev) == c;
}
int mon_error_category::from_code(int ev) const noexcept {
return -ev;
}
const bs::error_category& mon_category() noexcept {
static const mon_error_category c;
return c;
}
#pragma GCC diagnostic pop
#pragma clang diagnostic pop
| 2,260 | 25.290698 | 89 |
cc
|
null |
ceph-main/src/mon/error_code.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/system/error_code.hpp>
#include "include/rados.h"
const boost::system::error_category& mon_category() noexcept;
// The Monitor, like the OSD, mostly replies with POSIX error codes.
enum class mon_errc {
};
namespace boost::system {
template<>
struct is_error_code_enum<::mon_errc> {
static const bool value = true;
};
template<>
struct is_error_condition_enum<::mon_errc> {
static const bool value = false;
};
}
// explicit conversion:
inline boost::system::error_code make_error_code(mon_errc e) noexcept {
return { static_cast<int>(e), mon_category() };
}
// implicit conversion:
inline boost::system::error_condition make_error_condition(mon_errc e) noexcept {
return { static_cast<int>(e), mon_category() };
}
| 1,230 | 23.62 | 81 |
h
|
null |
ceph-main/src/mon/health_check.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <map>
#include "include/health.h"
#include "include/utime.h"
#include "common/Formatter.h"
struct health_check_t {
health_status_t severity;
std::string summary;
std::list<std::string> detail;
int64_t count = 0;
DENC(health_check_t, v, p) {
DENC_START(2, 1, p);
denc(v.severity, p);
denc(v.summary, p);
denc(v.detail, p);
if (struct_v >= 2) {
denc(v.count, p);
}
DENC_FINISH(p);
}
friend bool operator==(const health_check_t& l,
const health_check_t& r) {
return l.severity == r.severity &&
l.summary == r.summary &&
l.detail == r.detail &&
l.count == r.count;
}
friend bool operator!=(const health_check_t& l,
const health_check_t& r) {
return !(l == r);
}
void dump(ceph::Formatter *f, bool want_detail=true) const {
f->dump_stream("severity") << severity;
f->open_object_section("summary");
f->dump_string("message", summary);
f->dump_int("count", count);
f->close_section();
if (want_detail) {
f->open_array_section("detail");
for (auto& p : detail) {
f->open_object_section("detail_item");
f->dump_string("message", p);
f->close_section();
}
f->close_section();
}
}
static void generate_test_instances(std::list<health_check_t*>& ls) {
ls.push_back(new health_check_t);
ls.push_back(new health_check_t);
ls.back()->severity = HEALTH_ERR;
ls.back()->summary = "summarization";
ls.back()->detail = {"one", "two", "three"};
ls.back()->count = 42;
}
};
WRITE_CLASS_DENC(health_check_t)
struct health_mute_t {
std::string code;
utime_t ttl;
bool sticky = false;
std::string summary;
int64_t count;
DENC(health_mute_t, v, p) {
DENC_START(1, 1, p);
denc(v.code, p);
denc(v.ttl, p);
denc(v.sticky, p);
denc(v.summary, p);
denc(v.count, p);
DENC_FINISH(p);
}
void dump(ceph::Formatter *f) const {
f->dump_string("code", code);
if (ttl != utime_t()) {
f->dump_stream("ttl") << ttl;
}
f->dump_bool("sticky", sticky);
f->dump_string("summary", summary);
f->dump_int("count", count);
}
static void generate_test_instances(std::list<health_mute_t*>& ls) {
ls.push_back(new health_mute_t);
ls.push_back(new health_mute_t);
ls.back()->code = "OSD_DOWN";
ls.back()->ttl = utime_t(1, 2);
ls.back()->sticky = true;
ls.back()->summary = "foo bar";
ls.back()->count = 2;
}
};
WRITE_CLASS_DENC(health_mute_t)
struct health_check_map_t {
std::map<std::string,health_check_t> checks;
DENC(health_check_map_t, v, p) {
DENC_START(1, 1, p);
denc(v.checks, p);
DENC_FINISH(p);
}
void dump(ceph::Formatter *f) const {
for (auto& [code, check] : checks) {
f->dump_object(code, check);
}
}
static void generate_test_instances(std::list<health_check_map_t*>& ls) {
ls.push_back(new health_check_map_t);
ls.push_back(new health_check_map_t);
{
auto& d = ls.back()->add("FOO", HEALTH_WARN, "foo", 2);
d.detail.push_back("a");
d.detail.push_back("b");
}
{
auto& d = ls.back()->add("BAR", HEALTH_ERR, "bar!", 3);
d.detail.push_back("c");
d.detail.push_back("d");
d.detail.push_back("e");
}
}
void clear() {
checks.clear();
}
bool empty() const {
return checks.empty();
}
void swap(health_check_map_t& other) {
checks.swap(other.checks);
}
health_check_t& add(const std::string& code,
health_status_t severity,
const std::string& summary,
int64_t count) {
ceph_assert(checks.count(code) == 0);
health_check_t& r = checks[code];
r.severity = severity;
r.summary = summary;
r.count = count;
return r;
}
health_check_t& get_or_add(const std::string& code,
health_status_t severity,
const std::string& summary,
int64_t count) {
health_check_t& r = checks[code];
r.severity = severity;
r.summary = summary;
r.count += count;
return r;
}
void merge(const health_check_map_t& o) {
for (auto& [code, check] : o.checks) {
auto [it, new_check] = checks.try_emplace(code, check);
if (!new_check) {
// merge details, and hope the summary matches!
it->second.detail.insert(
it->second.detail.end(),
check.detail.begin(),
check.detail.end());
it->second.count += check.count;
}
}
}
friend bool operator==(const health_check_map_t& l,
const health_check_map_t& r) {
return l.checks == r.checks;
}
friend bool operator!=(const health_check_map_t& l,
const health_check_map_t& r) {
return !(l == r);
}
};
WRITE_CLASS_DENC(health_check_map_t)
| 4,875 | 23.502513 | 75 |
h
|
null |
ceph-main/src/mon/mon_types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MON_TYPES_H
#define CEPH_MON_TYPES_H
#include <map>
#include "include/Context.h"
#include "include/util.h"
#include "include/utime.h"
#include "common/Formatter.h"
#include "common/bit_str.h"
#include "common/ceph_releases.h"
// use as paxos_service index
enum {
PAXOS_MDSMAP,
PAXOS_OSDMAP,
PAXOS_LOG,
PAXOS_MONMAP,
PAXOS_AUTH,
PAXOS_MGR,
PAXOS_MGRSTAT,
PAXOS_HEALTH,
PAXOS_CONFIG,
PAXOS_KV,
PAXOS_NUM
};
#define CEPH_MON_ONDISK_MAGIC "ceph mon volume v012"
// map of entity_type -> features -> count
struct FeatureMap {
std::map<uint32_t,std::map<uint64_t,uint64_t>> m;
void add(uint32_t type, uint64_t features) {
if (type == CEPH_ENTITY_TYPE_MON) {
return;
}
m[type][features]++;
}
void add_mon(uint64_t features) {
m[CEPH_ENTITY_TYPE_MON][features]++;
}
void rm(uint32_t type, uint64_t features) {
if (type == CEPH_ENTITY_TYPE_MON) {
return;
}
auto p = m.find(type);
ceph_assert(p != m.end());
auto q = p->second.find(features);
ceph_assert(q != p->second.end());
if (--q->second == 0) {
p->second.erase(q);
if (p->second.empty()) {
m.erase(p);
}
}
}
FeatureMap& operator+=(const FeatureMap& o) {
for (auto& p : o.m) {
auto &v = m[p.first];
for (auto& q : p.second) {
v[q.first] += q.second;
}
}
return *this;
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(m, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(1, p);
decode(m, p);
DECODE_FINISH(p);
}
void dump(ceph::Formatter *f) const {
for (auto& p : m) {
f->open_array_section(ceph_entity_type_name(p.first));
for (auto& q : p.second) {
f->open_object_section("group");
std::stringstream ss;
ss << "0x" << std::hex << q.first << std::dec;
f->dump_string("features", ss.str());
f->dump_string("release", ceph_release_name(
ceph_release_from_features(q.first)));
f->dump_unsigned("num", q.second);
f->close_section();
}
f->close_section();
}
}
};
WRITE_CLASS_ENCODER(FeatureMap)
/**
* monitor db store stats
*/
struct MonitorDBStoreStats {
uint64_t bytes_total;
uint64_t bytes_sst;
uint64_t bytes_log;
uint64_t bytes_misc;
utime_t last_update;
MonitorDBStoreStats() :
bytes_total(0),
bytes_sst(0),
bytes_log(0),
bytes_misc(0)
{}
void dump(ceph::Formatter *f) const {
ceph_assert(f != NULL);
f->dump_int("bytes_total", bytes_total);
f->dump_int("bytes_sst", bytes_sst);
f->dump_int("bytes_log", bytes_log);
f->dump_int("bytes_misc", bytes_misc);
f->dump_stream("last_updated") << last_update;
}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(bytes_total, bl);
encode(bytes_sst, bl);
encode(bytes_log, bl);
encode(bytes_misc, bl);
encode(last_update, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &p) {
DECODE_START(1, p);
decode(bytes_total, p);
decode(bytes_sst, p);
decode(bytes_log, p);
decode(bytes_misc, p);
decode(last_update, p);
DECODE_FINISH(p);
}
static void generate_test_instances(std::list<MonitorDBStoreStats*>& ls) {
ls.push_back(new MonitorDBStoreStats);
ls.push_back(new MonitorDBStoreStats);
ls.back()->bytes_total = 1024*1024;
ls.back()->bytes_sst = 512*1024;
ls.back()->bytes_log = 256*1024;
ls.back()->bytes_misc = 256*1024;
ls.back()->last_update = utime_t();
}
};
WRITE_CLASS_ENCODER(MonitorDBStoreStats)
// data stats
struct DataStats {
ceph_data_stats_t fs_stats;
// data dir
utime_t last_update;
MonitorDBStoreStats store_stats;
void dump(ceph::Formatter *f) const {
ceph_assert(f != NULL);
f->dump_int("kb_total", (fs_stats.byte_total/1024));
f->dump_int("kb_used", (fs_stats.byte_used/1024));
f->dump_int("kb_avail", (fs_stats.byte_avail/1024));
f->dump_int("avail_percent", fs_stats.avail_percent);
f->dump_stream("last_updated") << last_update;
f->open_object_section("store_stats");
store_stats.dump(f);
f->close_section();
}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(3, 1, bl);
encode(fs_stats.byte_total, bl);
encode(fs_stats.byte_used, bl);
encode(fs_stats.byte_avail, bl);
encode(fs_stats.avail_percent, bl);
encode(last_update, bl);
encode(store_stats, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &p) {
DECODE_START(1, p);
// we moved from having fields in kb to fields in byte
if (struct_v > 2) {
decode(fs_stats.byte_total, p);
decode(fs_stats.byte_used, p);
decode(fs_stats.byte_avail, p);
} else {
uint64_t t;
decode(t, p);
fs_stats.byte_total = t*1024;
decode(t, p);
fs_stats.byte_used = t*1024;
decode(t, p);
fs_stats.byte_avail = t*1024;
}
decode(fs_stats.avail_percent, p);
decode(last_update, p);
if (struct_v > 1)
decode(store_stats, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(DataStats)
struct ScrubResult {
std::map<std::string,uint32_t> prefix_crc; ///< prefix -> crc
std::map<std::string,uint64_t> prefix_keys; ///< prefix -> key count
bool operator!=(const ScrubResult& other) {
return prefix_crc != other.prefix_crc || prefix_keys != other.prefix_keys;
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(prefix_crc, bl);
encode(prefix_keys, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(1, p);
decode(prefix_crc, p);
decode(prefix_keys, p);
DECODE_FINISH(p);
}
void dump(ceph::Formatter *f) const {
f->open_object_section("crc");
for (auto p = prefix_crc.begin(); p != prefix_crc.end(); ++p)
f->dump_unsigned(p->first.c_str(), p->second);
f->close_section();
f->open_object_section("keys");
for (auto p = prefix_keys.begin(); p != prefix_keys.end(); ++p)
f->dump_unsigned(p->first.c_str(), p->second);
f->close_section();
}
static void generate_test_instances(std::list<ScrubResult*>& ls) {
ls.push_back(new ScrubResult);
ls.push_back(new ScrubResult);
ls.back()->prefix_crc["foo"] = 123;
ls.back()->prefix_keys["bar"] = 456;
}
};
WRITE_CLASS_ENCODER(ScrubResult)
inline std::ostream& operator<<(std::ostream& out, const ScrubResult& r) {
return out << "ScrubResult(keys " << r.prefix_keys << " crc " << r.prefix_crc << ")";
}
/// for information like os, kernel, hostname, memory info, cpu model.
typedef std::map<std::string, std::string> Metadata;
namespace ceph {
namespace features {
namespace mon {
/**
* Get a feature's name based on its value.
*
* @param b raw feature value
*
* @remarks
* Consumers should not assume this interface will never change.
* @remarks
* As the number of features increase, so may the internal representation
* of the raw features. When this happens, this interface will change
* accordingly. So should consumers of this interface.
*/
static inline const char *get_feature_name(uint64_t b);
}
}
}
inline const char *ceph_mon_feature_name(uint64_t b)
{
return ceph::features::mon::get_feature_name(b);
};
class mon_feature_t {
static constexpr int HEAD_VERSION = 1;
static constexpr int COMPAT_VERSION = 1;
// mon-specific features
uint64_t features;
public:
explicit constexpr
mon_feature_t(const uint64_t f) : features(f) { }
mon_feature_t() :
features(0) { }
constexpr
mon_feature_t(const mon_feature_t &o) :
features(o.features) { }
mon_feature_t& operator&=(const mon_feature_t other) {
features &= other.features;
return (*this);
}
/**
* Obtain raw features
*
* @remarks
* Consumers should not assume this interface will never change.
* @remarks
* As the number of features increase, so may the internal representation
* of the raw features. When this happens, this interface will change
* accordingly. So should consumers of this interface.
*/
uint64_t get_raw() const {
return features;
}
constexpr
friend mon_feature_t operator&(const mon_feature_t a,
const mon_feature_t b) {
return mon_feature_t(a.features & b.features);
}
mon_feature_t& operator|=(const mon_feature_t other) {
features |= other.features;
return (*this);
}
constexpr
friend mon_feature_t operator|(const mon_feature_t a,
const mon_feature_t b) {
return mon_feature_t(a.features | b.features);
}
constexpr
friend mon_feature_t operator^(const mon_feature_t a,
const mon_feature_t b) {
return mon_feature_t(a.features ^ b.features);
}
mon_feature_t& operator^=(const mon_feature_t other) {
features ^= other.features;
return (*this);
}
bool operator==(const mon_feature_t other) const {
return (features == other.features);
}
bool operator!=(const mon_feature_t other) const {
return (features != other.features);
}
bool empty() const {
return features == 0;
}
/**
* Set difference of our features in respect to @p other
*
* Returns all the elements in our features that are not in @p other
*
* @returns all the features not in @p other
*/
mon_feature_t diff(const mon_feature_t other) const {
return mon_feature_t((features ^ other.features) & features);
}
/**
* Set intersection of our features and @p other
*
* Returns all the elements common to both our features and the
* features of @p other
*
* @returns the features common to @p other and us
*/
mon_feature_t intersection(const mon_feature_t other) const {
return mon_feature_t((features & other.features));
}
/**
* Checks whether we have all the features in @p other
*
* Returns true if we have all the features in @p other
*
* @returns true if we contain all the features in @p other
* @returns false if we do not contain some of the features in @p other
*/
bool contains_all(const mon_feature_t other) const {
mon_feature_t d = intersection(other);
return d == other;
}
/**
* Checks whether we contain any of the features in @p other.
*
* @returns true if we contain any of the features in @p other
* @returns false if we don't contain any of the features in @p other
*/
bool contains_any(const mon_feature_t other) const {
mon_feature_t d = intersection(other);
return !d.empty();
}
void set_feature(const mon_feature_t f) {
features |= f.features;
}
void unset_feature(const mon_feature_t f) {
features &= ~(f.features);
}
void print(std::ostream& out) const {
out << "[";
print_bit_str(features, out, ceph::features::mon::get_feature_name);
out << "]";
}
void print_with_value(std::ostream& out) const {
out << "[";
print_bit_str(features, out, ceph::features::mon::get_feature_name, true);
out << "]";
}
void dump(ceph::Formatter *f, const char *sec_name = NULL) const {
f->open_array_section((sec_name ? sec_name : "features"));
dump_bit_str(features, f, ceph::features::mon::get_feature_name);
f->close_section();
}
void dump_with_value(ceph::Formatter *f, const char *sec_name = NULL) const {
f->open_array_section((sec_name ? sec_name : "features"));
dump_bit_str(features, f, ceph::features::mon::get_feature_name, true);
f->close_section();
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(HEAD_VERSION, COMPAT_VERSION, bl);
encode(features, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(COMPAT_VERSION, p);
decode(features, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(mon_feature_t)
namespace ceph {
namespace features {
namespace mon {
constexpr mon_feature_t FEATURE_KRAKEN( (1ULL << 0));
constexpr mon_feature_t FEATURE_LUMINOUS( (1ULL << 1));
constexpr mon_feature_t FEATURE_MIMIC( (1ULL << 2));
constexpr mon_feature_t FEATURE_OSDMAP_PRUNE (1ULL << 3);
constexpr mon_feature_t FEATURE_NAUTILUS( (1ULL << 4));
constexpr mon_feature_t FEATURE_OCTOPUS( (1ULL << 5));
constexpr mon_feature_t FEATURE_PACIFIC( (1ULL << 6));
// elector pinging and CONNECTIVITY mode:
constexpr mon_feature_t FEATURE_PINGING( (1ULL << 7));
constexpr mon_feature_t FEATURE_QUINCY( (1ULL << 8));
constexpr mon_feature_t FEATURE_REEF( (1ULL << 9));
constexpr mon_feature_t FEATURE_RESERVED( (1ULL << 63));
constexpr mon_feature_t FEATURE_NONE( (0ULL));
/**
* All the features this monitor supports
*
* If there's a feature above, it should be OR'ed to this list.
*/
constexpr mon_feature_t get_supported() {
return (
FEATURE_KRAKEN |
FEATURE_LUMINOUS |
FEATURE_MIMIC |
FEATURE_OSDMAP_PRUNE |
FEATURE_NAUTILUS |
FEATURE_OCTOPUS |
FEATURE_PACIFIC |
FEATURE_PINGING |
FEATURE_QUINCY |
FEATURE_REEF |
FEATURE_NONE
);
}
/**
* All the features that, once set, cannot be removed.
*
* Features should only be added to this list if you want to make
* sure downgrades are not possible after a quorum supporting all
* these features has been formed.
*
* Any feature in this list will be automatically set on the monmap's
* features once all the monitors in the quorum support it.
*/
constexpr mon_feature_t get_persistent() {
return (
FEATURE_KRAKEN |
FEATURE_LUMINOUS |
FEATURE_MIMIC |
FEATURE_NAUTILUS |
FEATURE_OSDMAP_PRUNE |
FEATURE_OCTOPUS |
FEATURE_PACIFIC |
FEATURE_PINGING |
FEATURE_QUINCY |
FEATURE_REEF |
FEATURE_NONE
);
}
constexpr mon_feature_t get_optional() {
return (
FEATURE_OSDMAP_PRUNE |
FEATURE_NONE
);
}
static inline mon_feature_t get_feature_by_name(const std::string &n);
}
}
}
static inline ceph_release_t infer_ceph_release_from_mon_features(mon_feature_t f)
{
if (f.contains_all(ceph::features::mon::FEATURE_REEF)) {
return ceph_release_t::reef;
}
if (f.contains_all(ceph::features::mon::FEATURE_QUINCY)) {
return ceph_release_t::quincy;
}
if (f.contains_all(ceph::features::mon::FEATURE_PACIFIC)) {
return ceph_release_t::pacific;
}
if (f.contains_all(ceph::features::mon::FEATURE_OCTOPUS)) {
return ceph_release_t::octopus;
}
if (f.contains_all(ceph::features::mon::FEATURE_NAUTILUS)) {
return ceph_release_t::nautilus;
}
if (f.contains_all(ceph::features::mon::FEATURE_MIMIC)) {
return ceph_release_t::mimic;
}
if (f.contains_all(ceph::features::mon::FEATURE_LUMINOUS)) {
return ceph_release_t::luminous;
}
if (f.contains_all(ceph::features::mon::FEATURE_KRAKEN)) {
return ceph_release_t::kraken;
}
return ceph_release_t::unknown;
}
static inline const char *ceph::features::mon::get_feature_name(uint64_t b) {
mon_feature_t f(b);
if (f == FEATURE_KRAKEN) {
return "kraken";
} else if (f == FEATURE_LUMINOUS) {
return "luminous";
} else if (f == FEATURE_MIMIC) {
return "mimic";
} else if (f == FEATURE_OSDMAP_PRUNE) {
return "osdmap-prune";
} else if (f == FEATURE_NAUTILUS) {
return "nautilus";
} else if (f == FEATURE_PINGING) {
return "elector-pinging";
} else if (f == FEATURE_OCTOPUS) {
return "octopus";
} else if (f == FEATURE_PACIFIC) {
return "pacific";
} else if (f == FEATURE_QUINCY) {
return "quincy";
} else if (f == FEATURE_REEF) {
return "reef";
} else if (f == FEATURE_RESERVED) {
return "reserved";
}
return "unknown";
}
inline mon_feature_t ceph::features::mon::get_feature_by_name(const std::string &n) {
if (n == "kraken") {
return FEATURE_KRAKEN;
} else if (n == "luminous") {
return FEATURE_LUMINOUS;
} else if (n == "mimic") {
return FEATURE_MIMIC;
} else if (n == "osdmap-prune") {
return FEATURE_OSDMAP_PRUNE;
} else if (n == "nautilus") {
return FEATURE_NAUTILUS;
} else if (n == "feature-pinging") {
return FEATURE_PINGING;
} else if (n == "octopus") {
return FEATURE_OCTOPUS;
} else if (n == "pacific") {
return FEATURE_PACIFIC;
} else if (n == "quincy") {
return FEATURE_QUINCY;
} else if (n == "reef") {
return FEATURE_REEF;
} else if (n == "reserved") {
return FEATURE_RESERVED;
}
return FEATURE_NONE;
}
inline std::ostream& operator<<(std::ostream& out, const mon_feature_t& f) {
out << "mon_feature_t(";
f.print(out);
out << ")";
return out;
}
struct ProgressEvent {
std::string message; ///< event description
float progress; ///< [0..1]
bool add_to_ceph_s;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 1, bl);
encode(message, bl);
encode(progress, bl);
encode(add_to_ceph_s, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(2, p);
decode(message, p);
decode(progress, p);
if (struct_v >= 2){
decode(add_to_ceph_s, p);
} else {
if (!message.empty()) {
add_to_ceph_s = true;
}
}
DECODE_FINISH(p);
}
void dump(ceph::Formatter *f) const {
f->dump_string("message", message);
f->dump_float("progress", progress);
f->dump_bool("add_to_ceph_s", add_to_ceph_s);
}
};
WRITE_CLASS_ENCODER(ProgressEvent)
#endif
| 18,274 | 26.034024 | 87 |
h
|
null |
ceph-main/src/mount/canonicalize.c
|
/*
* canonicalize.c -- canonicalize pathname by removing symlinks
* Copyright (C) 1993 Rick Sladkey <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library Public License for more details.
*
*/
/*
* This routine is part of libc. We include it nevertheless,
* since the libc version has some security flaws.
*
* TODO: use canonicalize_file_name() when exist in glibc
*/
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>
#include <limits.h>
#ifndef MAXSYMLINKS
# define MAXSYMLINKS 256
#endif
static char *
myrealpath(const char *path, char *resolved_path, int maxreslth) {
int readlinks = 0;
char *npath;
char *link_path;
int n;
char *buf = NULL;
npath = resolved_path;
/* If it's a relative pathname use getcwd for starters. */
if (*path != '/') {
if (!getcwd(npath, maxreslth-2))
return NULL;
npath += strlen(npath);
if (npath[-1] != '/')
*npath++ = '/';
} else {
*npath++ = '/';
path++;
}
/* Expand each slash-separated pathname component. */
link_path = malloc(PATH_MAX+1);
if (!link_path)
return NULL;
while (*path != '\0') {
/* Ignore stray "/" */
if (*path == '/') {
path++;
continue;
}
if (*path == '.' && (path[1] == '\0' || path[1] == '/')) {
/* Ignore "." */
path++;
continue;
}
if (*path == '.' && path[1] == '.' &&
(path[2] == '\0' || path[2] == '/')) {
/* Backup for ".." */
path += 2;
while (npath > resolved_path+1 &&
(--npath)[-1] != '/')
;
continue;
}
/* Safely copy the next pathname component. */
while (*path != '\0' && *path != '/') {
if (npath-resolved_path > maxreslth-2) {
errno = ENAMETOOLONG;
goto err;
}
*npath++ = *path++;
}
/* Protect against infinite loops. */
if (readlinks++ > MAXSYMLINKS) {
errno = ELOOP;
goto err;
}
/* See if last pathname component is a symlink. */
*npath = '\0';
n = readlink(resolved_path, link_path, PATH_MAX);
if (n < 0) {
/* EINVAL means the file exists but isn't a symlink. */
if (errno != EINVAL)
goto err;
} else {
int m;
char *newbuf;
/* Note: readlink doesn't add the null byte. */
link_path[n] = '\0';
if (*link_path == '/')
/* Start over for an absolute symlink. */
npath = resolved_path;
else
/* Otherwise back up over this component. */
while (*(--npath) != '/')
;
/* Insert symlink contents into path. */
m = strlen(path);
newbuf = malloc(m + n + 1);
if (!newbuf)
goto err;
memcpy(newbuf, link_path, n);
memcpy(newbuf + n, path, m + 1);
free(buf);
path = buf = newbuf;
}
*npath++ = '/';
}
/* Delete trailing slash but don't whomp a lone slash. */
if (npath != resolved_path+1 && npath[-1] == '/')
npath--;
/* Make sure it's null terminated. */
*npath = '\0';
free(link_path);
free(buf);
return resolved_path;
err:
free(link_path);
free(buf);
return NULL;
}
/*
* Converts private "dm-N" names to "/dev/mapper/<name>"
*
* Since 2.6.29 (patch 784aae735d9b0bba3f8b9faef4c8b30df3bf0128) kernel sysfs
* provides the real DM device names in /sys/block/<ptname>/dm/name
*/
char *
canonicalize_dm_name(const char *ptname)
{
FILE *f;
size_t sz;
char path[268], name[256], *res = NULL;
snprintf(path, sizeof(path), "/sys/block/%s/dm/name", ptname);
if (!(f = fopen(path, "r")))
return NULL;
/* read "<name>\n" from sysfs */
if (fgets(name, sizeof(name), f) && (sz = strlen(name)) > 1) {
name[sz - 1] = '\0';
snprintf(path, sizeof(path), "/dev/mapper/%s", name);
res = strdup(path);
}
fclose(f);
return res;
}
char *
canonicalize_path(const char *path)
{
char *canonical;
char *p;
if (path == NULL)
return NULL;
canonical = malloc(PATH_MAX+2);
if (!canonical)
return NULL;
if (!myrealpath(path, canonical, PATH_MAX+1)) {
free(canonical);
return strdup(path);
}
p = strrchr(canonical, '/');
if (p && strncmp(p, "/dm-", 4) == 0 && isdigit(*(p + 4))) {
p = canonicalize_dm_name(p+1);
if (p) {
free(canonical);
return p;
}
}
return canonical;
}
| 4,505 | 21.088235 | 77 |
c
|
null |
ceph-main/src/mount/conf.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <string>
#include <vector>
#include <cstring>
#include <map>
#include "common/async/context_pool.h"
#include "common/ceph_context.h"
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "global/global_init.h"
#include "auth/KeyRing.h"
#include "mon/MonClient.h"
#include "mount.ceph.h"
using namespace std;
extern "C" void mount_ceph_get_config_info(const char *config_file,
const char *name,
bool v2_addrs,
struct ceph_config_info *cci)
{
int err;
KeyRing keyring;
CryptoKey secret;
std::string secret_str;
std::string monaddrs;
vector<const char *> args = { "--name", name };
bool first = true;
if (config_file) {
args.push_back("--conf");
args.push_back(config_file);
}
/* Create CephContext */
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DAEMON_ACTIONS|CINIT_FLAG_NO_MON_CONFIG);
auto& conf = cct->_conf;
conf.parse_env(cct->get_module_type()); // environment variables override
conf.apply_changes(nullptr);
auto fsid = conf.get_val<uuid_d>("fsid");
fsid.print(cci->cci_fsid);
ceph::async::io_context_pool ioc(1);
MonClient monc = MonClient(cct.get(), ioc);
err = monc.build_initial_monmap();
if (err)
goto scrape_keyring;
for (const auto& mon : monc.monmap.addr_mons) {
auto& eaddr = mon.first;
/*
* Filter v1 addrs if we're running in ms_mode=legacy. Filter
* v2 addrs for any other ms_mode.
*/
if (v2_addrs) {
if (!eaddr.is_msgr2())
continue;
} else {
if (!eaddr.is_legacy())
continue;
}
std::string addr = eaddr.ip_n_port_to_str();
/* If this will overrun cci_mons, stop here */
if (monaddrs.length() + 1 + addr.length() + 1 > sizeof(cci->cci_mons))
break;
if (first)
first = false;
else
monaddrs += ",";
monaddrs += addr;
}
if (monaddrs.length())
strcpy(cci->cci_mons, monaddrs.c_str());
else
mount_ceph_debug("Could not discover monitor addresses\n");
scrape_keyring:
err = keyring.from_ceph_context(cct.get());
if (err) {
mount_ceph_debug("keyring.from_ceph_context failed: %d\n", err);
return;
}
if (!keyring.get_secret(conf->name, secret)) {
mount_ceph_debug("keyring.get_secret failed\n");
return;
}
secret.encode_base64(secret_str);
if (secret_str.length() + 1 > sizeof(cci->cci_secret)) {
mount_ceph_debug("secret is too long\n");
return;
}
strcpy(cci->cci_secret, secret_str.c_str());
}
| 2,638 | 22.774775 | 75 |
cc
|
null |
ceph-main/src/mount/mount.ceph.c
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/mount.h>
#include <stdbool.h>
#include <sys/mman.h>
#include <wait.h>
#include <cap-ng.h>
#include <getopt.h>
#include "common/module.h"
#include "common/secret.h"
#include "include/addr_parsing.h"
#include "mount.ceph.h"
#ifndef MS_RELATIME
# define MS_RELATIME (1<<21)
#endif
bool verboseflag = false;
bool fakeflag = false;
bool skip_mtab_flag = false;
bool v2_addrs = true;
bool no_fallback = false;
bool ms_mode_specified = false;
bool mon_addr_specified = false;
static const char * const EMPTY_STRING = "";
/* TODO duplicates logic from kernel */
#define CEPH_AUTH_NAME_DEFAULT "guest"
/* path to sysfs for ceph */
#define CEPH_SYS_FS_PATH "/sys/module/ceph/"
#define CEPH_SYS_FS_PARAM_PATH CEPH_SYS_FS_PATH"/parameters"
/*
* mount support hint from kernel -- we only need to check
* v2 support for catching bugs.
*/
#define CEPH_V2_MOUNT_SUPPORT_PATH CEPH_SYS_FS_PARAM_PATH"/mount_syntax_v2"
#define CEPH_DEFAULT_V2_MS_MODE "prefer-crc"
#include "mtab.c"
enum mount_dev_format {
MOUNT_DEV_FORMAT_OLD = 0,
MOUNT_DEV_FORMAT_NEW = 1,
};
struct ceph_mount_info {
unsigned long cmi_flags;
char *cmi_name;
char *cmi_fsname;
char *cmi_fsid;
char *cmi_path;
char *cmi_mons;
char *cmi_conf;
char *cmi_opts;
int cmi_opts_len;
char cmi_secret[SECRET_BUFSIZE];
/* mount dev syntax format */
enum mount_dev_format format;
};
static void mon_addr_as_resolve_param(char *mon_addr)
{
for (; *mon_addr; ++mon_addr)
if (*mon_addr == '/')
*mon_addr = ',';
}
static void resolved_mon_addr_as_mount_opt(char *mon_addr)
{
for (; *mon_addr; ++mon_addr)
if (*mon_addr == ',')
*mon_addr = '/';
}
static void resolved_mon_addr_as_mount_dev(char *mon_addr)
{
for (; *mon_addr; ++mon_addr)
if (*mon_addr == '/')
*mon_addr = ',';
}
static void block_signals (int how)
{
sigset_t sigs;
sigfillset (&sigs);
sigdelset(&sigs, SIGTRAP);
sigdelset(&sigs, SIGSEGV);
sigprocmask (how, &sigs, (sigset_t *) 0);
}
void mount_ceph_debug(const char *fmt, ...)
{
if (verboseflag) {
va_list args;
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
}
/*
* append a key value pair option to option string.
*/
static void append_opt(const char *key, const char *value,
struct ceph_mount_info *cmi, int *pos)
{
if (*pos != 0)
*pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, *pos, ",");
if (value) {
*pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, *pos, key);
*pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, *pos, "=");
*pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, *pos, value);
} else {
*pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, *pos, key);
}
}
/*
* remove a key value pair from option string. caller should ensure that the
* key value pair is separated by "=".
*/
static int remove_opt(struct ceph_mount_info *cmi, const char *key, char **value)
{
char *key_start = strstr(cmi->cmi_opts, key);
if (!key_start) {
return -ENOENT;
}
/* key present -- try to split */
char *key_sep = strstr(key_start, "=");
if (!key_sep) {
return -ENOENT;
}
if (strncmp(key, key_start, key_sep - key_start) != 0) {
return -ENOENT;
}
++key_sep;
char *value_end = strstr(key_sep, ",");
if (!value_end)
value_end = key_sep + strlen(key_sep);
if (value_end != key_sep && value) {
size_t len1 = value_end - key_sep;
*value = strndup(key_sep, len1+1);
if (!*value)
return -ENOMEM;
(*value)[len1] = '\0';
}
/* purge it */
size_t len2 = strlen(value_end);
if (len2) {
++value_end;
memmove(key_start, value_end, len2);
} else {
/* last kv pair - swallow the comma */
if (*(key_start - 1) == ',') {
--key_start;
}
*key_start = '\0';
}
return 0;
}
static void record_name(const char *name, struct ceph_mount_info *cmi)
{
int name_pos = 0;
int name_len = 0;
name_pos = safe_cat(&cmi->cmi_name, &name_len, name_pos, name);
}
/*
* parse old device string of format: <mon_addr>:/<path>
*/
static int parse_old_dev(const char *dev_str, struct ceph_mount_info *cmi,
int *opt_pos)
{
size_t len;
char *mount_path;
mount_path = strstr(dev_str, ":/");
if (!mount_path) {
fprintf(stderr, "source mount path was not specified\n");
return -EINVAL;
}
len = mount_path - dev_str;
if (len != 0) {
free(cmi->cmi_mons);
/* overrides mon_addr passed via mount option (if any) */
cmi->cmi_mons = strndup(dev_str, len);
if (!cmi->cmi_mons)
return -ENOMEM;
mon_addr_specified = true;
} else {
/* reset mon_addr=<> mount option */
mon_addr_specified = false;
}
mount_path++;
cmi->cmi_path = strdup(mount_path);
if (!cmi->cmi_path)
return -ENOMEM;
if (!cmi->cmi_name)
record_name(CEPH_AUTH_NAME_DEFAULT, cmi);
cmi->format = MOUNT_DEV_FORMAT_OLD;
return 0;
}
/*
* parse new device string of format: name@<fsid>.fs_name=/path
*/
static int parse_new_dev(const char *dev_str, struct ceph_mount_info *cmi,
int *opt_pos)
{
size_t len;
char *name;
char *name_end;
char *dot;
char *fs_name;
name_end = strstr(dev_str, "@");
if (!name_end) {
mount_ceph_debug("invalid new device string format\n");
return -ENODEV;
}
len = name_end - dev_str;
if (!len) {
fprintf(stderr, "missing <name> in device\n");
return -EINVAL;
}
name = (char *)alloca(len+1);
memcpy(name, dev_str, len);
name[len] = '\0';
if (cmi->cmi_name && strcmp(cmi->cmi_name, name)) {
fprintf(stderr, "mismatching ceph user in mount option and device string\n");
return -EINVAL;
}
/* record name and store in option string */
if (!cmi->cmi_name) {
record_name(name, cmi);
append_opt("name", name, cmi, opt_pos);
}
++name_end;
/* check if an fsid is included in the device string */
dot = strstr(name_end, ".");
if (!dot) {
fprintf(stderr, "invalid device string format\n");
return -EINVAL;
}
len = dot - name_end;
if (len) {
/* check if this _looks_ like a UUID */
if (len != CLUSTER_FSID_LEN - 1) {
fprintf(stderr, "invalid device string format\n");
return -EINVAL;
}
cmi->cmi_fsid = strndup(name_end, len);
if (!cmi->cmi_fsid)
return -ENOMEM;
}
++dot;
fs_name = strstr(dot, "=");
if (!fs_name) {
fprintf(stderr, "invalid device string format\n");
return -EINVAL;
}
len = fs_name - dot;
if (!len) {
fprintf(stderr, "missing <fs_name> in device\n");
return -EINVAL;
}
cmi->cmi_fsname = strndup(dot, len);
if (!cmi->cmi_fsname)
return -ENOMEM;
++fs_name;
if (strlen(fs_name)) {
cmi->cmi_path = strdup(fs_name);
if (!cmi->cmi_path)
return -ENOMEM;
}
/* new-style dev - force using v2 addrs first */
if (!ms_mode_specified && !mon_addr_specified)
append_opt("ms_mode", CEPH_DEFAULT_V2_MS_MODE, cmi,
opt_pos);
cmi->format = MOUNT_DEV_FORMAT_NEW;
return 0;
}
static int parse_dev(const char *dev_str, struct ceph_mount_info *cmi,
int *opt_pos)
{
int ret;
ret = parse_new_dev(dev_str, cmi, opt_pos);
if (ret < 0 && ret != -ENODEV)
return -EINVAL;
if (ret)
ret = parse_old_dev(dev_str, cmi, opt_pos);
if (ret < 0)
fprintf(stderr, "error parsing device string\n");
return ret;
}
/* resolve monitor host and optionally record in option string.
* use opt_pos to determine if the caller wants to record the
* resolved address in mount option (c.f., mount_old_device_format).
*/
static int finalize_src(struct ceph_mount_info *cmi, int *opt_pos,
char **resolved_addr)
{
char *src;
size_t len = strlen(cmi->cmi_mons);
char *addr = alloca(len+1);
memcpy(addr, cmi->cmi_mons, len+1);
mon_addr_as_resolve_param(addr);
src = resolve_addrs(addr);
if (!src)
return -1;
mount_ceph_debug("mount.ceph: resolved to: \"%s\"\n", src);
if (opt_pos) {
resolved_mon_addr_as_mount_opt(src);
append_opt("mon_addr", src, cmi, opt_pos);
} else if (resolved_addr) {
*resolved_addr = strdup(src);
}
free(src);
return 0;
}
static int
drop_capabilities()
{
capng_setpid(getpid());
capng_clear(CAPNG_SELECT_BOTH);
if (capng_update(CAPNG_ADD, CAPNG_PERMITTED, CAP_DAC_READ_SEARCH)) {
fprintf(stderr, "Unable to update permitted capability set.\n");
return EX_SYSERR;
}
if (capng_update(CAPNG_ADD, CAPNG_EFFECTIVE, CAP_DAC_READ_SEARCH)) {
fprintf(stderr, "Unable to update effective capability set.\n");
return EX_SYSERR;
}
if (capng_apply(CAPNG_SELECT_BOTH)) {
fprintf(stderr, "Unable to apply new capability set.\n");
return EX_SYSERR;
}
return 0;
}
/*
* Attempt to fetch info from the local config file, if one is present. Since
* this involves activity that may be dangerous for a privileged task, we
* fork(), have the child drop privileges and do the processing and then hand
* back the results via memory shared with the parent.
*/
static int fetch_config_info(struct ceph_mount_info *cmi)
{
int ret = 0;
pid_t pid;
struct ceph_config_info *cci;
/* Don't do anything if we already have requisite info */
if (cmi->cmi_secret[0] && cmi->cmi_mons && cmi->cmi_fsid)
return 0;
cci = mmap((void *)0, sizeof(*cci), PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_SHARED, -1, 0);
if (cci == MAP_FAILED) {
mount_ceph_debug("Unable to allocate memory: %s\n",
strerror(errno));
return EX_SYSERR;
}
pid = fork();
if (pid < 0) {
mount_ceph_debug("fork() failure: %s\n", strerror(errno));
ret = EX_SYSERR;
goto out;
}
if (pid == 0) {
char *entity_name = NULL;
int name_pos = 0;
int name_len = 0;
/* child */
ret = drop_capabilities();
if (ret)
exit(1);
name_pos = safe_cat(&entity_name, &name_len, name_pos, "client.");
name_pos = safe_cat(&entity_name, &name_len, name_pos, cmi->cmi_name);
mount_ceph_get_config_info(cmi->cmi_conf, entity_name, v2_addrs, cci);
free(entity_name);
exit(0);
} else {
/* parent */
pid = wait(&ret);
if (!WIFEXITED(ret)) {
mount_ceph_debug("Child process terminated abnormally.\n");
ret = EX_SYSERR;
goto out;
}
ret = WEXITSTATUS(ret);
if (ret) {
mount_ceph_debug("Child exited with status %d\n", ret);
ret = EX_SYSERR;
goto out;
}
/*
* Copy values from MAP_SHARED buffer to cmi if we didn't
* already find anything and we got something from the child.
*/
size_t len;
if (!cmi->cmi_secret[0] && cci->cci_secret[0]) {
len = strnlen(cci->cci_secret, SECRET_BUFSIZE);
if (len < SECRET_BUFSIZE) {
memcpy(cmi->cmi_secret, cci->cci_secret, len + 1);
} else {
mount_ceph_debug("secret is too long (len=%zu max=%zu)!\n", len, SECRET_BUFSIZE);
}
}
if (!cmi->cmi_mons && cci->cci_mons[0]) {
len = strnlen(cci->cci_mons, MON_LIST_BUFSIZE);
if (len < MON_LIST_BUFSIZE)
cmi->cmi_mons = strndup(cci->cci_mons, len + 1);
}
if (!cmi->cmi_fsid) {
len = strnlen(cci->cci_fsid, CLUSTER_FSID_LEN);
if (len < CLUSTER_FSID_LEN)
cmi->cmi_fsid = strndup(cci->cci_fsid, len + 1);
}
}
out:
munmap(cci, sizeof(*cci));
return ret;
}
/*
* this one is partially based on parse_options() from cifs.mount.c
*/
static int parse_options(const char *data, struct ceph_mount_info *cmi,
int *opt_pos)
{
char * next_keyword = NULL;
char *name = NULL;
if (data == EMPTY_STRING)
goto out;
mount_ceph_debug("parsing options: %s\n", data);
do {
char * value = NULL;
bool skip = true;
/* check if ends with trailing comma */
if(*data == 0)
break;
next_keyword = strchr(data,',');
/* temporarily null terminate end of keyword=value pair */
if(next_keyword)
*next_keyword++ = 0;
/* temporarily null terminate keyword to make keyword and value distinct */
if ((value = strchr(data, '=')) != NULL) {
*value = '\0';
value++;
}
if (strcmp(data, "ro") == 0) {
cmi->cmi_flags |= MS_RDONLY;
} else if (strcmp(data, "rw") == 0) {
cmi->cmi_flags &= ~MS_RDONLY;
} else if (strcmp(data, "nosuid") == 0) {
cmi->cmi_flags |= MS_NOSUID;
} else if (strcmp(data, "suid") == 0) {
cmi->cmi_flags &= ~MS_NOSUID;
} else if (strcmp(data, "dev") == 0) {
cmi->cmi_flags &= ~MS_NODEV;
} else if (strcmp(data, "nodev") == 0) {
cmi->cmi_flags |= MS_NODEV;
} else if (strcmp(data, "noexec") == 0) {
cmi->cmi_flags |= MS_NOEXEC;
} else if (strcmp(data, "exec") == 0) {
cmi->cmi_flags &= ~MS_NOEXEC;
} else if (strcmp(data, "sync") == 0) {
cmi->cmi_flags |= MS_SYNCHRONOUS;
} else if (strcmp(data, "remount") == 0) {
cmi->cmi_flags |= MS_REMOUNT;
} else if (strcmp(data, "mandlock") == 0) {
cmi->cmi_flags |= MS_MANDLOCK;
} else if ((strcmp(data, "nobrl") == 0) ||
(strcmp(data, "nolock") == 0)) {
cmi->cmi_flags &= ~MS_MANDLOCK;
} else if (strcmp(data, "noatime") == 0) {
cmi->cmi_flags |= MS_NOATIME;
} else if (strcmp(data, "nodiratime") == 0) {
cmi->cmi_flags |= MS_NODIRATIME;
} else if (strcmp(data, "relatime") == 0) {
cmi->cmi_flags |= MS_RELATIME;
} else if (strcmp(data, "strictatime") == 0) {
cmi->cmi_flags |= MS_STRICTATIME;
} else if (strcmp(data, "noauto") == 0) {
/* ignore */
} else if (strcmp(data, "_netdev") == 0) {
/* ignore */
} else if (strcmp(data, "nofail") == 0) {
/* ignore */
} else if (strcmp(data, "fs") == 0) {
if (!value || !*value) {
fprintf(stderr, "mount option fs requires a value.\n");
return -EINVAL;
}
data = "mds_namespace";
skip = false;
} else if (strcmp(data, "nofallback") == 0) {
no_fallback = true;
} else if (strcmp(data, "secretfile") == 0) {
int ret;
if (!value || !*value) {
fprintf(stderr, "keyword secretfile found, but no secret file specified\n");
return -EINVAL;
}
ret = read_secret_from_file(value, cmi->cmi_secret, sizeof(cmi->cmi_secret));
if (ret < 0) {
fprintf(stderr, "error reading secret file: %d\n", ret);
return ret;
}
} else if (strcmp(data, "secret") == 0) {
size_t len;
if (!value || !*value) {
fprintf(stderr, "mount option secret requires a value.\n");
return -EINVAL;
}
len = strnlen(value, sizeof(cmi->cmi_secret)) + 1;
if (len <= sizeof(cmi->cmi_secret))
memcpy(cmi->cmi_secret, value, len);
} else if (strcmp(data, "conf") == 0) {
if (!value || !*value) {
fprintf(stderr, "mount option conf requires a value.\n");
return -EINVAL;
}
/* keep pointer to value */
cmi->cmi_conf = strdup(value);
if (!cmi->cmi_conf)
return -ENOMEM;
} else if (strcmp(data, "name") == 0) {
if (!value || !*value) {
fprintf(stderr, "mount option name requires a value.\n");
return -EINVAL;
}
/* keep pointer to value */
name = value;
skip = false;
} else if (strcmp(data, "ms_mode") == 0) {
if (!value || !*value) {
fprintf(stderr, "mount option ms_mode requires a value.\n");
return -EINVAL;
}
/* Only legacy ms_mode needs v1 addrs */
v2_addrs = strcmp(value, "legacy");
skip = false;
ms_mode_specified = true;
} else if (strcmp(data, "mon_addr") == 0) {
/* monitor address to use for mounting */
if (!value || !*value) {
fprintf(stderr, "mount option mon_addr requires a value.\n");
return -EINVAL;
}
cmi->cmi_mons = strdup(value);
if (!cmi->cmi_mons)
return -ENOMEM;
mon_addr_specified = true;
} else {
/* unrecognized mount options, passing to kernel */
skip = false;
}
/* Copy (possibly modified) option to out */
if (!skip)
append_opt(data, value, cmi, opt_pos);
data = next_keyword;
} while (data);
out:
/*
* set ->cmi_name conditionally -- this gets checked when parsing new
* device format. for old device format, ->cmi_name is set to default
* user name when name option is not passed in.
*/
if (name)
record_name(name, cmi);
if (cmi->cmi_opts)
mount_ceph_debug("mount.ceph: options \"%s\".\n", cmi->cmi_opts);
if (!cmi->cmi_opts) {
cmi->cmi_opts = strdup(EMPTY_STRING);
if (!cmi->cmi_opts)
return -ENOMEM;
}
return 0;
}
static int parse_arguments(int argc, char *const *const argv,
const char **src, const char **node, const char **opts)
{
int opt = 0;
static struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "no-mtab", no_argument, 0, 'n' },
{ "verbose", no_argument, 0, 'v' },
{ "fake", no_argument, 0, 'f' },
{ "options", required_argument, 0, 'o' },
{ 0, 0, 0, 0 }
};
if (argc < 2) {
// There were no arguments. Just show the usage.
return 1;
}
if ((!strcmp(argv[1], "-h")) || (!strcmp(argv[1], "--help"))) {
// The user asked for help.
return 1;
}
// The first two arguments are positional
if (argc < 3)
return -EINVAL;
*src = argv[1];
*node = argv[2];
// Parse the remaining options
*opts = EMPTY_STRING;
while ((opt = getopt_long(argc, argv, "hnvfo:",
long_options, NULL)) != -1) {
switch (opt) {
case 'h' : // -h or --help
return 1;
case 'n' : // -n or --no-mtab
skip_mtab_flag = true;
break;
case 'v' : // -v or --verbose
verboseflag = true;
break;
case 'f' : // -f or --fake
fakeflag = true;
break;
case 'o' : // -o or --options
*opts = optarg;
break;
default:
return -EINVAL;
}
}
return 0;
}
/* modprobe failing doesn't necessarily prevent from working, so this
returns void */
static void modprobe(void)
{
int r;
r = module_load("ceph", NULL);
if (r)
printf("failed to load ceph kernel module (%d)\n", r);
}
static void usage(const char *prog_name)
{
printf("usage: %s [src] [mount-point] [-n] [-v] [-o ceph-options]\n",
prog_name);
printf("options:\n");
printf("\t-h, --help\tPrint this help\n");
printf("\t-n, --no-mtab\tDo not update /etc/mtab\n");
printf("\t-v, --verbose\tVerbose\n");
printf("\t-f, --fake\tFake mount, do not actually mount\n");
printf("ceph-options: refer to mount.ceph(8)\n");
printf("\n");
}
/*
* The structure itself lives on the stack, so don't free it. Just the
* pointers inside.
*/
static void ceph_mount_info_free(struct ceph_mount_info *cmi)
{
free(cmi->cmi_opts);
free(cmi->cmi_name);
free(cmi->cmi_fsname);
free(cmi->cmi_fsid);
free(cmi->cmi_path);
free(cmi->cmi_mons);
free(cmi->cmi_conf);
}
static int call_mount_system_call(const char *rsrc, const char *node, struct ceph_mount_info *cmi)
{
int r = 0;
if (!fakeflag) {
r = mount(rsrc, node, "ceph", cmi->cmi_flags, cmi->cmi_opts);
}
return r;
}
static int mount_new_device_format(const char *node, struct ceph_mount_info *cmi)
{
int r;
char *rsrc = NULL;
int pos = 0;
int len = 0;
if (!cmi->cmi_fsid) {
fprintf(stderr, "missing ceph cluster-id");
return -EINVAL;
}
pos = safe_cat(&rsrc, &len, pos, cmi->cmi_name);
pos = safe_cat(&rsrc, &len, pos, "@");
pos = safe_cat(&rsrc, &len, pos, cmi->cmi_fsid);
pos = safe_cat(&rsrc, &len, pos, ".");
pos = safe_cat(&rsrc, &len, pos, cmi->cmi_fsname);
pos = safe_cat(&rsrc, &len, pos, "=");
if (cmi->cmi_path)
safe_cat(&rsrc, &len, pos, cmi->cmi_path);
mount_ceph_debug("mount.ceph: trying mount with new device syntax: %s\n",
rsrc);
if (cmi->cmi_opts)
mount_ceph_debug("mount.ceph: options \"%s\" will pass to kernel\n",
cmi->cmi_opts);
r = call_mount_system_call(rsrc, node, cmi);
if (r)
r = -errno;
free(rsrc);
return r;
}
static int mount_old_device_format(const char *node, struct ceph_mount_info *cmi)
{
int r;
int len = 0;
int pos = 0;
char *mon_addr = NULL;
char *rsrc = NULL;
r = remove_opt(cmi, "mon_addr", &mon_addr);
if (r) {
fprintf(stderr, "failed to switch using old device format\n");
return -EINVAL;
}
/* if we reach here and still have a v2 addr, we'd need to
* refresh with v1 addrs, since we'll be not passing ms_mode
* with the old syntax.
*/
if (v2_addrs && !ms_mode_specified && !mon_addr_specified) {
mount_ceph_debug("mount.ceph: switching to using v1 address with old syntax\n");
v2_addrs = false;
free(mon_addr);
free(cmi->cmi_mons);
mon_addr = NULL;
cmi->cmi_mons = NULL;
fetch_config_info(cmi);
if (!cmi->cmi_mons) {
fprintf(stderr, "unable to determine (v1) mon addresses\n");
return -EINVAL;
}
r = finalize_src(cmi, NULL, &mon_addr);
if (r) {
fprintf(stderr, "failed to resolve (v1) mon addresses\n");
return -EINVAL;
}
remove_opt(cmi, "ms_mode", NULL);
}
pos = strlen(cmi->cmi_opts);
if (cmi->cmi_fsname)
append_opt("mds_namespace", cmi->cmi_fsname, cmi, &pos);
if (cmi->cmi_fsid)
append_opt("fsid", cmi->cmi_fsid, cmi, &pos);
pos = 0;
resolved_mon_addr_as_mount_dev(mon_addr);
pos = safe_cat(&rsrc, &len, pos, mon_addr);
pos = safe_cat(&rsrc, &len, pos, ":");
if (cmi->cmi_path)
safe_cat(&rsrc, &len, pos, cmi->cmi_path);
mount_ceph_debug("mount.ceph: trying mount with old device syntax: %s\n",
rsrc);
if (cmi->cmi_opts)
mount_ceph_debug("mount.ceph: options \"%s\" will pass to kernel\n",
cmi->cmi_opts);
r = call_mount_system_call(rsrc, node, cmi);
free(mon_addr);
free(rsrc);
return r;
}
/*
* check whether to fall-back to using old-style mount syntax (called
* when new-style mount syntax fails). this is mostly to catch any
* new-style (v2) implementation bugs in the kernel and is primarly
* used in teuthology tests.
*/
static bool should_fallback()
{
int ret;
struct stat stbuf;
if (!no_fallback)
return true;
ret = stat(CEPH_V2_MOUNT_SUPPORT_PATH, &stbuf);
if (ret) {
mount_ceph_debug("mount.ceph: v2 mount support check returned %d\n",
errno);
if (errno == ENOENT)
mount_ceph_debug("mount.ceph: kernel does not support v2"
" syntax\n");
/* fallback on *all* errors */
return true;
}
fprintf(stderr, "mount.ceph: kernel BUG!\n");
return false;
}
static int do_mount(const char *dev, const char *node,
struct ceph_mount_info *cmi) {
int pos = 0;
int retval= -EINVAL;
bool fallback = true;
/* no v2 addresses available via config - try v1 addresses */
if (v2_addrs &&
!cmi->cmi_mons &&
!ms_mode_specified &&
!mon_addr_specified) {
mount_ceph_debug("mount.ceph: switching to using v1 address\n");
v2_addrs = false;
fetch_config_info(cmi);
remove_opt(cmi, "ms_mode", NULL);
}
if (!cmi->cmi_mons) {
fprintf(stderr, "unable to determine mon addresses\n");
return -EINVAL;
}
pos = strlen(cmi->cmi_opts);
retval = finalize_src(cmi, &pos, NULL);
if (retval) {
fprintf(stderr, "failed to resolve source\n");
return -EINVAL;
}
retval = -1;
if (cmi->format == MOUNT_DEV_FORMAT_NEW) {
retval = mount_new_device_format(node, cmi);
if (retval)
fallback = (should_fallback() && retval == -EINVAL && cmi->cmi_fsid);
}
/* pass-through or fallback to old-style mount device */
if (retval && fallback)
retval = mount_old_device_format(node, cmi);
if (retval) {
retval = EX_FAIL;
switch (errno) {
case ENODEV:
fprintf(stderr, "mount error: ceph filesystem not supported by the system\n");
break;
case EHOSTUNREACH:
fprintf(stderr, "mount error: no mds server is up or the cluster is laggy\n");
break;
default:
fprintf(stderr, "mount error %d = %s\n", errno, strerror(errno));
}
}
if (!retval && !skip_mtab_flag) {
update_mtab_entry(dev, node, "ceph", cmi->cmi_opts, cmi->cmi_flags, 0, 0);
}
return retval;
}
static int append_key_or_secret_option(struct ceph_mount_info *cmi)
{
int pos = strlen(cmi->cmi_opts);
if (!cmi->cmi_secret[0] && !is_kernel_secret(cmi->cmi_name))
return 0;
if (pos)
pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, pos, ",");
/* when parsing kernel options (-o remount) we get '<hidden>' as the secret */
if (cmi->cmi_secret[0] && (strcmp(cmi->cmi_secret, "<hidden>") != 0)) {
int ret = set_kernel_secret(cmi->cmi_secret, cmi->cmi_name);
if (ret < 0) {
if (ret == -ENODEV || ret == -ENOSYS) {
/* old kernel; fall back to secret= in options */
pos = safe_cat(&cmi->cmi_opts,
&cmi->cmi_opts_len, pos,
"secret=");
pos = safe_cat(&cmi->cmi_opts,
&cmi->cmi_opts_len, pos,
cmi->cmi_secret);
return 0;
}
fprintf(stderr, "adding ceph secret key to kernel failed: %s\n",
strerror(-ret));
return ret;
}
}
pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, pos, "key=");
pos = safe_cat(&cmi->cmi_opts, &cmi->cmi_opts_len, pos, cmi->cmi_name);
return 0;
}
int main(int argc, char *argv[])
{
int opt_pos = 0;
const char *dev, *node, *opts;
int retval;
struct ceph_mount_info cmi = { 0 };
retval = parse_arguments(argc, argv, &dev, &node, &opts);
if (retval) {
usage(argv[0]);
retval = (retval > 0) ? 0 : EX_USAGE;
goto out;
}
retval = parse_options(opts, &cmi, &opt_pos);
if (retval) {
fprintf(stderr, "failed to parse ceph_options: %d\n", retval);
retval = EX_USAGE;
goto out;
}
retval = parse_dev(dev, &cmi, &opt_pos);
if (retval) {
fprintf(stderr, "unable to parse mount device string: %d\n", retval);
retval = EX_USAGE;
goto out;
}
/*
* We don't care if this errors out, since this is best-effort.
* note that this fetches v1 or v2 addr depending on @v2_addr
* flag.
*/
fetch_config_info(&cmi);
/* Ensure the ceph key_type is available */
modprobe();
retval = append_key_or_secret_option(&cmi);
if (retval) {
fprintf(stderr, "couldn't append secret option: %d\n", retval);
retval = EX_USAGE;
goto out;
}
block_signals(SIG_BLOCK);
retval = do_mount(dev, node, &cmi);
block_signals(SIG_UNBLOCK);
out:
ceph_mount_info_free(&cmi);
return retval;
}
| 25,524 | 24.073674 | 98 |
c
|
null |
ceph-main/src/mount/mount.ceph.h
|
#ifndef _SRC_MOUNT_MOUNT_CEPH_H
#define _SRC_MOUNT_MOUNT_CEPH_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* See class CryptoKey
*
* 2 (for the type of secret) +
* 8 (for the timestamp) +
* 2 (for the length of secret) +
* 16 (for an AES-128 key)
*/
#define MAX_RAW_SECRET_LEN (2 + 8 + 2 + 16)
/* Max length of base64 encoded secret. 4/3 original size (rounded up) */
#define MAX_SECRET_LEN ((MAX_RAW_SECRET_LEN + (3 - 1)) * 4 / 3)
/* Max Including null terminator */
#define SECRET_BUFSIZE (MAX_SECRET_LEN + 1)
/* 2k should be enough for anyone? */
#define MON_LIST_BUFSIZE 2048
#define CLUSTER_FSID_LEN 37
void mount_ceph_debug(const char *fmt, ...);
struct ceph_config_info {
char cci_secret[SECRET_BUFSIZE]; // auth secret
char cci_mons[MON_LIST_BUFSIZE]; // monitor addrs
char cci_fsid[CLUSTER_FSID_LEN]; // cluster fsid
};
void mount_ceph_get_config_info(const char *config_file, const char *name,
bool v2_addrs, struct ceph_config_info *cci);
#ifdef __cplusplus
}
#endif
#endif /* _SRC_MOUNT_MOUNT_CEPH_H */
| 1,049 | 22.333333 | 74 |
h
|
null |
ceph-main/src/mount/mtab.c
|
/*
* this code lifted from util-linux-ng, licensed GPLv2+,
*
* git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git
*
* whoever decided that each special mount program is responsible
* for updating /etc/mtab should be spanked.
*
* <[email protected]>
*/
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include <time.h>
#include <mntent.h>
#include <stdarg.h>
#include "mount/canonicalize.c"
/* Updating mtab ----------------------------------------------*/
/* Flag for already existing lock file. */
static int we_created_lockfile = 0;
static int lockfile_fd = -1;
/* Flag to indicate that signals have been set up. */
static int signals_have_been_setup = 0;
/* Ensure that the lock is released if we are interrupted. */
extern char *strsignal(int sig); /* not always in <string.h> */
static void
setlkw_timeout (int sig) {
/* nothing, fcntl will fail anyway */
}
#define _PATH_MOUNTED "/etc/mtab"
#define _PATH_MOUNTED_LOCK "/etc/mtab~"
#define PROC_SUPER_MAGIC 0x9fa0
/* exit status - bits below are ORed */
#define EX_USAGE 1 /* incorrect invocation or permission */
#define EX_SYSERR 2 /* out of memory, cannot fork, ... */
#define EX_SOFTWARE 4 /* internal mount bug or wrong version */
#define EX_USER 8 /* user interrupt */
#define EX_FILEIO 16 /* problems writing, locking, ... mtab/fstab */
#define EX_FAIL 32 /* mount failure */
#define EX_SOMEOK 64 /* some mount succeeded */
int die(int err, const char *fmt, ...) {
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
fprintf(stderr, "\n");
va_end(args);
exit(err);
}
static void
handler (int sig) {
die(EX_USER, "%s", strsignal(sig));
}
/* Remove lock file. */
void
unlock_mtab (void) {
if (we_created_lockfile) {
close(lockfile_fd);
lockfile_fd = -1;
unlink (_PATH_MOUNTED_LOCK);
we_created_lockfile = 0;
}
}
/* Create the lock file.
The lock file will be removed if we catch a signal or when we exit. */
/* The old code here used flock on a lock file /etc/mtab~ and deleted
this lock file afterwards. However, as rgooch remarks, that has a
race: a second mount may be waiting on the lock and proceed as
soon as the lock file is deleted by the first mount, and immediately
afterwards a third mount comes, creates a new /etc/mtab~, applies
flock to that, and also proceeds, so that the second and third mount
now both are scribbling in /etc/mtab.
The new code uses a link() instead of a creat(), where we proceed
only if it was us that created the lock, and hence we always have
to delete the lock afterwards. Now the use of flock() is in principle
superfluous, but avoids an arbitrary sleep(). */
/* Where does the link point to? Obvious choices are mtab and mtab~~.
HJLu points out that the latter leads to races. Right now we use
mtab~.<pid> instead. Use 20 as upper bound for the length of %d. */
#define MOUNTLOCK_LINKTARGET _PATH_MOUNTED_LOCK "%d"
#define MOUNTLOCK_LINKTARGET_LTH (sizeof(_PATH_MOUNTED_LOCK)+20)
/*
* The original mount locking code has used sleep(1) between attempts and
* maximal number of attempts has been 5.
*
* There was very small number of attempts and extremely long waiting (1s)
* that is useless on machines with large number of concurret mount processes.
*
* Now we wait few thousand microseconds between attempts and we have global
* time limit (30s) rather than limit for number of attempts. The advantage
* is that this method also counts time which we spend in fcntl(F_SETLKW) and
* number of attempts is not so much restricted.
*
* -- [email protected] [2007-Mar-2007]
*/
/* maximum seconds between first and last attempt */
#define MOUNTLOCK_MAXTIME 30
/* sleep time (in microseconds, max=999999) between attempts */
#define MOUNTLOCK_WAITTIME 5000
void
lock_mtab (void) {
int i;
struct timespec waittime;
struct timeval maxtime;
char linktargetfile[MOUNTLOCK_LINKTARGET_LTH];
if (!signals_have_been_setup) {
int sig = 0;
struct sigaction sa;
sa.sa_handler = handler;
sa.sa_flags = 0;
sigfillset (&sa.sa_mask);
while (sigismember (&sa.sa_mask, ++sig) != -1
&& sig != SIGCHLD) {
if (sig == SIGALRM)
sa.sa_handler = setlkw_timeout;
else
sa.sa_handler = handler;
sigaction (sig, &sa, (struct sigaction *) 0);
}
signals_have_been_setup = 1;
}
snprintf(linktargetfile, sizeof(linktargetfile), MOUNTLOCK_LINKTARGET,
getpid ());
i = open (linktargetfile, O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR);
if (i < 0) {
int errsv = errno;
/* linktargetfile does not exist (as a file)
and we cannot create it. Read-only filesystem?
Too many files open in the system?
Filesystem full? */
die (EX_FILEIO, "can't create lock file %s: %s "
"(use -n flag to override)",
linktargetfile, strerror (errsv));
}
close(i);
gettimeofday(&maxtime, NULL);
maxtime.tv_sec += MOUNTLOCK_MAXTIME;
waittime.tv_sec = 0;
waittime.tv_nsec = (1000 * MOUNTLOCK_WAITTIME);
/* Repeat until it was us who made the link */
while (!we_created_lockfile) {
struct timeval now;
struct flock flock;
int errsv, j;
j = link(linktargetfile, _PATH_MOUNTED_LOCK);
errsv = errno;
if (j == 0)
we_created_lockfile = 1;
if (j < 0 && errsv != EEXIST) {
(void) unlink(linktargetfile);
die (EX_FILEIO, "can't link lock file %s: %s "
"(use -n flag to override)",
_PATH_MOUNTED_LOCK, strerror (errsv));
}
lockfile_fd = open (_PATH_MOUNTED_LOCK, O_WRONLY);
if (lockfile_fd < 0) {
/* Strange... Maybe the file was just deleted? */
int errsv = errno;
gettimeofday(&now, NULL);
if (errno == ENOENT && now.tv_sec < maxtime.tv_sec) {
we_created_lockfile = 0;
continue;
}
(void) unlink(linktargetfile);
die (EX_FILEIO, "can't open lock file %s: %s "
"(use -n flag to override)",
_PATH_MOUNTED_LOCK, strerror (errsv));
}
flock.l_type = F_WRLCK;
flock.l_whence = SEEK_SET;
flock.l_start = 0;
flock.l_len = 0;
if (j == 0) {
/* We made the link. Now claim the lock. */
if (fcntl (lockfile_fd, F_SETLK, &flock) == -1) {
/* proceed, since it was us who created the lockfile anyway */
}
(void) unlink(linktargetfile);
} else {
/* Someone else made the link. Wait. */
gettimeofday(&now, NULL);
if (now.tv_sec < maxtime.tv_sec) {
alarm(maxtime.tv_sec - now.tv_sec);
if (fcntl (lockfile_fd, F_SETLKW, &flock) == -1) {
int errsv = errno;
(void) unlink(linktargetfile);
die (EX_FILEIO, "can't lock lock file %s: %s",
_PATH_MOUNTED_LOCK, (errno == EINTR) ?
"timed out" : strerror (errsv));
}
alarm(0);
nanosleep(&waittime, NULL);
} else {
(void) unlink(linktargetfile);
die (EX_FILEIO, "Cannot create link %s\n"
"Perhaps there is a stale lock file?\n",
_PATH_MOUNTED_LOCK);
}
close(lockfile_fd);
}
}
}
static void
update_mtab_entry(const char *spec, const char *node, const char *type,
const char *opts, int flags, int freq, int pass) {
struct statfs buf;
int err = statfs(_PATH_MOUNTED, &buf);
if (err) {
printf("mount: can't statfs %s: %s", _PATH_MOUNTED,
strerror (err));
return;
}
/* /etc/mtab is symbol link to /proc/self/mounts? */
if (buf.f_type == PROC_SUPER_MAGIC)
return;
if (!opts)
opts = "rw";
struct mntent mnt;
mnt.mnt_fsname = strdup(spec);
mnt.mnt_dir = canonicalize_path(node);
mnt.mnt_type = strdup(type);
mnt.mnt_opts = strdup(opts);
mnt.mnt_freq = freq;
mnt.mnt_passno = pass;
FILE *fp;
lock_mtab();
fp = setmntent(_PATH_MOUNTED, "a+");
if (fp == NULL) {
int errsv = errno;
printf("mount: can't open %s: %s", _PATH_MOUNTED,
strerror (errsv));
} else {
if ((addmntent (fp, &mnt)) == 1) {
int errsv = errno;
printf("mount: error writing %s: %s",
_PATH_MOUNTED, strerror (errsv));
}
}
endmntent(fp);
unlock_mtab();
free(mnt.mnt_fsname);
free(mnt.mnt_dir);
free(mnt.mnt_type);
free(mnt.mnt_opts);
}
| 8,269 | 27.033898 | 79 |
c
|
null |
ceph-main/src/msg/Connection.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "msg/Connection.h"
#include "msg/Messenger.h"
bool Connection::is_blackhole() const {
auto& conf = msgr->cct->_conf;
switch (peer_type) {
case CEPH_ENTITY_TYPE_MON:
return conf->ms_blackhole_mon;
case CEPH_ENTITY_TYPE_OSD:
return conf->ms_blackhole_osd;
case CEPH_ENTITY_TYPE_MDS:
return conf->ms_blackhole_mds;
case CEPH_ENTITY_TYPE_CLIENT:
return conf->ms_blackhole_client;
default:
return false;
}
}
| 550 | 21.958333 | 70 |
cc
|
null |
ceph-main/src/msg/Connection.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONNECTION_H
#define CEPH_CONNECTION_H
#include <stdlib.h>
#include <ostream>
#include "auth/Auth.h"
#include "common/RefCountedObj.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/ref.h"
#include "common/ceph_mutex.h"
#include "include/ceph_assert.h" // Because intusive_ptr clobbers our assert...
#include "include/buffer.h"
#include "include/types.h"
#include "common/item_history.h"
#include "msg/MessageRef.h"
// ======================================================
// abstract Connection, for keeping per-connection state
class Messenger;
#ifdef UNIT_TESTS_BUILT
class Interceptor;
#endif
struct Connection : public RefCountedObjectSafe {
mutable ceph::mutex lock = ceph::make_mutex("Connection::lock");
Messenger *msgr;
RefCountedPtr priv;
int peer_type = -1;
int64_t peer_id = -1; // [msgr2 only] the 0 of osd.0, 4567 or client.4567
safe_item_history<entity_addrvec_t> peer_addrs;
utime_t last_keepalive, last_keepalive_ack;
bool anon = false; ///< anonymous outgoing connection
private:
uint64_t features = 0;
public:
bool is_loopback = false;
bool failed = false; // true if we are a lossy connection that has failed.
int rx_buffers_version = 0;
std::map<ceph_tid_t,std::pair<ceph::buffer::list, int>> rx_buffers;
// authentication state
// FIXME make these private after ms_handle_authorizer is removed
public:
AuthCapsInfo peer_caps_info;
EntityName peer_name;
uint64_t peer_global_id = 0;
#ifdef UNIT_TESTS_BUILT
Interceptor *interceptor;
#endif
public:
void set_priv(const RefCountedPtr& o) {
std::lock_guard l{lock};
priv = o;
}
RefCountedPtr get_priv() {
std::lock_guard l{lock};
return priv;
}
void clear_priv() {
std::lock_guard l{lock};
priv.reset(nullptr);
}
/**
* Used to judge whether this connection is ready to send. Usually, the
* implementation need to build a own shakehand or sesson then it can be
* ready to send.
*
* @return true if ready to send, or false otherwise
*/
virtual bool is_connected() = 0;
virtual bool is_msgr2() const {
return false;
}
bool is_anon() const {
return anon;
}
Messenger *get_messenger() {
return msgr;
}
/**
* Queue the given Message to send out on the given Connection.
* Success in this function does not guarantee Message delivery, only
* success in queueing the Message. Other guarantees may be provided based
* on the Connection policy.
*
* @param m The Message to send. The Messenger consumes a single reference
* when you pass it in.
*
* @return 0 on success, or -errno on failure.
*/
virtual int send_message(Message *m) = 0;
virtual int send_message2(MessageRef m)
{
return send_message(m.detach()); /* send_message(Message *m) consumes a reference */
}
/**
* Send a "keepalive" ping along the given Connection, if it's working.
* If the underlying connection has broken, this function does nothing.
*
* @return 0, or implementation-defined error numbers.
*/
virtual void send_keepalive() = 0;
/**
* Mark down the given Connection.
*
* This will cause us to discard its outgoing queue, and if reset
* detection is enabled in the policy and the endpoint tries to
* reconnect they will discard their queue when we inform them of
* the session reset.
*
* It does not generate any notifications to the Dispatcher.
*/
virtual void mark_down() = 0;
/**
* Mark a Connection as "disposable", setting it to lossy
* (regardless of initial Policy). This does not immediately close
* the Connection once Messages have been delivered, so as long as
* there are no errors you can continue to receive responses; but it
* will not attempt to reconnect for message delivery or preserve
* your old delivery semantics, either.
*
* TODO: There's some odd stuff going on in our SimpleMessenger
* implementation during connect that looks unused; is there
* more of a contract that that's enforcing?
*/
virtual void mark_disposable() = 0;
// WARNING / FIXME: this is not populated for loopback connections
AuthCapsInfo& get_peer_caps_info() {
return peer_caps_info;
}
const EntityName& get_peer_entity_name() {
return peer_name;
}
uint64_t get_peer_global_id() {
return peer_global_id;
}
int get_peer_type() const { return peer_type; }
void set_peer_type(int t) { peer_type = t; }
// peer_id is only defined for msgr2
int64_t get_peer_id() const { return peer_id; }
void set_peer_id(int64_t t) { peer_id = t; }
bool peer_is_mon() const { return peer_type == CEPH_ENTITY_TYPE_MON; }
bool peer_is_mgr() const { return peer_type == CEPH_ENTITY_TYPE_MGR; }
bool peer_is_mds() const { return peer_type == CEPH_ENTITY_TYPE_MDS; }
bool peer_is_osd() const { return peer_type == CEPH_ENTITY_TYPE_OSD; }
bool peer_is_client() const { return peer_type == CEPH_ENTITY_TYPE_CLIENT; }
/// which of the peer's addrs is actually in use for this connection
virtual entity_addr_t get_peer_socket_addr() const = 0;
entity_addr_t get_peer_addr() const {
return peer_addrs->front();
}
const entity_addrvec_t& get_peer_addrs() const {
return *peer_addrs;
}
void set_peer_addr(const entity_addr_t& a) {
peer_addrs = entity_addrvec_t(a);
}
void set_peer_addrs(const entity_addrvec_t& av) { peer_addrs = av; }
uint64_t get_features() const { return features; }
bool has_feature(uint64_t f) const { return features & f; }
bool has_features(uint64_t f) const {
return (features & f) == f;
}
void set_features(uint64_t f) { features = f; }
void set_feature(uint64_t f) { features |= f; }
virtual int get_con_mode() const {
return CEPH_CON_MODE_CRC;
}
void post_rx_buffer(ceph_tid_t tid, ceph::buffer::list& bl) {
#if 0
std::lock_guard l{lock};
++rx_buffers_version;
rx_buffers[tid] = pair<bufferlist,int>(bl, rx_buffers_version);
#endif
}
void revoke_rx_buffer(ceph_tid_t tid) {
#if 0
std::lock_guard l{lock};
rx_buffers.erase(tid);
#endif
}
utime_t get_last_keepalive() const {
std::lock_guard l{lock};
return last_keepalive;
}
void set_last_keepalive(utime_t t) {
std::lock_guard l{lock};
last_keepalive = t;
}
utime_t get_last_keepalive_ack() const {
std::lock_guard l{lock};
return last_keepalive_ack;
}
void set_last_keepalive_ack(utime_t t) {
std::lock_guard l{lock};
last_keepalive_ack = t;
}
bool is_blackhole() const;
protected:
Connection(CephContext *cct, Messenger *m)
: RefCountedObjectSafe(cct),
msgr(m)
{}
~Connection() override {
//generic_dout(0) << "~Connection " << this << dendl;
}
};
using ConnectionRef = ceph::ref_t<Connection>;
#endif /* CEPH_CONNECTION_H */
| 7,256 | 27.237354 | 88 |
h
|
null |
ceph-main/src/msg/DispatchQueue.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "msg/Message.h"
#include "DispatchQueue.h"
#include "Messenger.h"
#include "common/ceph_context.h"
#define dout_subsys ceph_subsys_ms
#include "common/debug.h"
using ceph::cref_t;
using ceph::ref_t;
/*******************
* DispatchQueue
*/
#undef dout_prefix
#define dout_prefix *_dout << "-- " << msgr->get_myaddrs() << " "
double DispatchQueue::get_max_age(utime_t now) const {
std::lock_guard l{lock};
if (marrival.empty())
return 0;
else
return (now - marrival.begin()->first);
}
uint64_t DispatchQueue::pre_dispatch(const ref_t<Message>& m)
{
ldout(cct,1) << "<== " << m->get_source_inst()
<< " " << m->get_seq()
<< " ==== " << *m
<< " ==== " << m->get_payload().length()
<< "+" << m->get_middle().length()
<< "+" << m->get_data().length()
<< " (" << ceph_con_mode_name(m->get_connection()->get_con_mode())
<< " " << m->get_footer().front_crc << " "
<< m->get_footer().middle_crc
<< " " << m->get_footer().data_crc << ")"
<< " " << m << " con " << m->get_connection()
<< dendl;
uint64_t msize = m->get_dispatch_throttle_size();
m->set_dispatch_throttle_size(0); // clear it out, in case we requeue this message.
return msize;
}
void DispatchQueue::post_dispatch(const ref_t<Message>& m, uint64_t msize)
{
dispatch_throttle_release(msize);
ldout(cct,20) << "done calling dispatch on " << m << dendl;
}
bool DispatchQueue::can_fast_dispatch(const cref_t<Message> &m) const
{
return msgr->ms_can_fast_dispatch(m);
}
void DispatchQueue::fast_dispatch(const ref_t<Message>& m)
{
uint64_t msize = pre_dispatch(m);
msgr->ms_fast_dispatch(m);
post_dispatch(m, msize);
}
void DispatchQueue::fast_preprocess(const ref_t<Message>& m)
{
msgr->ms_fast_preprocess(m);
}
void DispatchQueue::enqueue(const ref_t<Message>& m, int priority, uint64_t id)
{
std::lock_guard l{lock};
if (stop) {
return;
}
ldout(cct,20) << "queue " << m << " prio " << priority << dendl;
add_arrival(m);
if (priority >= CEPH_MSG_PRIO_LOW) {
mqueue.enqueue_strict(id, priority, QueueItem(m));
} else {
mqueue.enqueue(id, priority, m->get_cost(), QueueItem(m));
}
cond.notify_all();
}
void DispatchQueue::local_delivery(const ref_t<Message>& m, int priority)
{
auto local_delivery_stamp = ceph_clock_now();
m->set_recv_stamp(local_delivery_stamp);
m->set_throttle_stamp(local_delivery_stamp);
m->set_recv_complete_stamp(local_delivery_stamp);
std::lock_guard l{local_delivery_lock};
if (local_messages.empty())
local_delivery_cond.notify_all();
local_messages.emplace(m, priority);
return;
}
void DispatchQueue::run_local_delivery()
{
std::unique_lock l{local_delivery_lock};
while (true) {
if (stop_local_delivery)
break;
if (local_messages.empty()) {
local_delivery_cond.wait(l);
continue;
}
auto p = std::move(local_messages.front());
local_messages.pop();
l.unlock();
const ref_t<Message>& m = p.first;
int priority = p.second;
fast_preprocess(m);
if (can_fast_dispatch(m)) {
fast_dispatch(m);
} else {
enqueue(m, priority, 0);
}
l.lock();
}
}
void DispatchQueue::dispatch_throttle_release(uint64_t msize)
{
if (msize) {
ldout(cct,10) << __func__ << " " << msize << " to dispatch throttler "
<< dispatch_throttler.get_current() << "/"
<< dispatch_throttler.get_max() << dendl;
dispatch_throttler.put(msize);
}
}
/*
* This function delivers incoming messages to the Messenger.
* Connections with messages are kept in queues; when beginning a message
* delivery the highest-priority queue is selected, the connection from the
* front of the queue is removed, and its message read. If the connection
* has remaining messages at that priority level, it is re-placed on to the
* end of the queue. If the queue is empty; it's removed.
* The message is then delivered and the process starts again.
*/
void DispatchQueue::entry()
{
std::unique_lock l{lock};
while (true) {
while (!mqueue.empty()) {
QueueItem qitem = mqueue.dequeue();
if (!qitem.is_code())
remove_arrival(qitem.get_message());
l.unlock();
if (qitem.is_code()) {
if (cct->_conf->ms_inject_internal_delays &&
cct->_conf->ms_inject_delay_probability &&
(rand() % 10000)/10000.0 < cct->_conf->ms_inject_delay_probability) {
utime_t t;
t.set_from_double(cct->_conf->ms_inject_internal_delays);
ldout(cct, 1) << "DispatchQueue::entry inject delay of " << t
<< dendl;
t.sleep();
}
switch (qitem.get_code()) {
case D_BAD_REMOTE_RESET:
msgr->ms_deliver_handle_remote_reset(qitem.get_connection());
break;
case D_CONNECT:
msgr->ms_deliver_handle_connect(qitem.get_connection());
break;
case D_ACCEPT:
msgr->ms_deliver_handle_accept(qitem.get_connection());
break;
case D_BAD_RESET:
msgr->ms_deliver_handle_reset(qitem.get_connection());
break;
case D_CONN_REFUSED:
msgr->ms_deliver_handle_refused(qitem.get_connection());
break;
default:
ceph_abort();
}
} else {
const ref_t<Message>& m = qitem.get_message();
if (stop) {
ldout(cct,10) << " stop flag set, discarding " << m << " " << *m << dendl;
} else {
uint64_t msize = pre_dispatch(m);
msgr->ms_deliver_dispatch(m);
post_dispatch(m, msize);
}
}
l.lock();
}
if (stop)
break;
// wait for something to be put on queue
cond.wait(l);
}
}
void DispatchQueue::discard_queue(uint64_t id) {
std::lock_guard l{lock};
std::list<QueueItem> removed;
mqueue.remove_by_class(id, &removed);
for (auto i = removed.begin(); i != removed.end(); ++i) {
ceph_assert(!(i->is_code())); // We don't discard id 0, ever!
const ref_t<Message>& m = i->get_message();
remove_arrival(m);
dispatch_throttle_release(m->get_dispatch_throttle_size());
}
}
void DispatchQueue::start()
{
ceph_assert(!stop);
ceph_assert(!dispatch_thread.is_started());
dispatch_thread.create("ms_dispatch");
local_delivery_thread.create("ms_local");
}
void DispatchQueue::wait()
{
local_delivery_thread.join();
dispatch_thread.join();
}
void DispatchQueue::discard_local()
{
decltype(local_messages)().swap(local_messages);
}
void DispatchQueue::shutdown()
{
// stop my local delivery thread
{
std::scoped_lock l{local_delivery_lock};
stop_local_delivery = true;
local_delivery_cond.notify_all();
}
// stop my dispatch thread
{
std::scoped_lock l{lock};
stop = true;
cond.notify_all();
}
}
| 6,999 | 25.717557 | 85 |
cc
|
null |
ceph-main/src/msg/DispatchQueue.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DISPATCHQUEUE_H
#define CEPH_DISPATCHQUEUE_H
#include <atomic>
#include <map>
#include <queue>
#include <boost/intrusive_ptr.hpp>
#include "include/ceph_assert.h"
#include "include/common_fwd.h"
#include "common/Throttle.h"
#include "common/ceph_mutex.h"
#include "common/Thread.h"
#include "common/PrioritizedQueue.h"
#include "Message.h"
class Messenger;
struct Connection;
/**
* The DispatchQueue contains all the connections which have Messages
* they want to be dispatched, carefully organized by Message priority
* and permitted to deliver in a round-robin fashion.
* See Messenger::dispatch_entry for details.
*/
class DispatchQueue {
class QueueItem {
int type;
ConnectionRef con;
ceph::ref_t<Message> m;
public:
explicit QueueItem(const ceph::ref_t<Message>& m) : type(-1), con(0), m(m) {}
QueueItem(int type, Connection *con) : type(type), con(con), m(0) {}
bool is_code() const {
return type != -1;
}
int get_code () const {
ceph_assert(is_code());
return type;
}
const ceph::ref_t<Message>& get_message() {
ceph_assert(!is_code());
return m;
}
Connection *get_connection() {
ceph_assert(is_code());
return con.get();
}
};
CephContext *cct;
Messenger *msgr;
mutable ceph::mutex lock;
ceph::condition_variable cond;
PrioritizedQueue<QueueItem, uint64_t> mqueue;
std::set<std::pair<double, ceph::ref_t<Message>>> marrival;
std::map<ceph::ref_t<Message>, decltype(marrival)::iterator> marrival_map;
void add_arrival(const ceph::ref_t<Message>& m) {
marrival_map.insert(
make_pair(
m,
marrival.insert(std::make_pair(m->get_recv_stamp(), m)).first
)
);
}
void remove_arrival(const ceph::ref_t<Message>& m) {
auto it = marrival_map.find(m);
ceph_assert(it != marrival_map.end());
marrival.erase(it->second);
marrival_map.erase(it);
}
std::atomic<uint64_t> next_id;
enum { D_CONNECT = 1, D_ACCEPT, D_BAD_REMOTE_RESET, D_BAD_RESET, D_CONN_REFUSED, D_NUM_CODES };
/**
* The DispatchThread runs dispatch_entry to empty out the dispatch_queue.
*/
class DispatchThread : public Thread {
DispatchQueue *dq;
public:
explicit DispatchThread(DispatchQueue *dq) : dq(dq) {}
void *entry() override {
dq->entry();
return 0;
}
} dispatch_thread;
ceph::mutex local_delivery_lock;
ceph::condition_variable local_delivery_cond;
bool stop_local_delivery;
std::queue<std::pair<ceph::ref_t<Message>, int>> local_messages;
class LocalDeliveryThread : public Thread {
DispatchQueue *dq;
public:
explicit LocalDeliveryThread(DispatchQueue *dq) : dq(dq) {}
void *entry() override {
dq->run_local_delivery();
return 0;
}
} local_delivery_thread;
uint64_t pre_dispatch(const ceph::ref_t<Message>& m);
void post_dispatch(const ceph::ref_t<Message>& m, uint64_t msize);
public:
/// Throttle preventing us from building up a big backlog waiting for dispatch
Throttle dispatch_throttler;
bool stop;
void local_delivery(const ceph::ref_t<Message>& m, int priority);
void local_delivery(Message* m, int priority) {
return local_delivery(ceph::ref_t<Message>(m, false), priority); /* consume ref */
}
void run_local_delivery();
double get_max_age(utime_t now) const;
int get_queue_len() const {
std::lock_guard l{lock};
return mqueue.length();
}
/**
* Release memory accounting back to the dispatch throttler.
*
* @param msize The amount of memory to release.
*/
void dispatch_throttle_release(uint64_t msize);
void queue_connect(Connection *con) {
std::lock_guard l{lock};
if (stop)
return;
mqueue.enqueue_strict(
0,
CEPH_MSG_PRIO_HIGHEST,
QueueItem(D_CONNECT, con));
cond.notify_all();
}
void queue_accept(Connection *con) {
std::lock_guard l{lock};
if (stop)
return;
mqueue.enqueue_strict(
0,
CEPH_MSG_PRIO_HIGHEST,
QueueItem(D_ACCEPT, con));
cond.notify_all();
}
void queue_remote_reset(Connection *con) {
std::lock_guard l{lock};
if (stop)
return;
mqueue.enqueue_strict(
0,
CEPH_MSG_PRIO_HIGHEST,
QueueItem(D_BAD_REMOTE_RESET, con));
cond.notify_all();
}
void queue_reset(Connection *con) {
std::lock_guard l{lock};
if (stop)
return;
mqueue.enqueue_strict(
0,
CEPH_MSG_PRIO_HIGHEST,
QueueItem(D_BAD_RESET, con));
cond.notify_all();
}
void queue_refused(Connection *con) {
std::lock_guard l{lock};
if (stop)
return;
mqueue.enqueue_strict(
0,
CEPH_MSG_PRIO_HIGHEST,
QueueItem(D_CONN_REFUSED, con));
cond.notify_all();
}
bool can_fast_dispatch(const ceph::cref_t<Message> &m) const;
void fast_dispatch(const ceph::ref_t<Message>& m);
void fast_dispatch(Message* m) {
return fast_dispatch(ceph::ref_t<Message>(m, false)); /* consume ref */
}
void fast_preprocess(const ceph::ref_t<Message>& m);
void enqueue(const ceph::ref_t<Message>& m, int priority, uint64_t id);
void enqueue(Message* m, int priority, uint64_t id) {
return enqueue(ceph::ref_t<Message>(m, false), priority, id); /* consume ref */
}
void discard_queue(uint64_t id);
void discard_local();
uint64_t get_id() {
return next_id++;
}
void start();
void entry();
void wait();
void shutdown();
bool is_started() const {return dispatch_thread.is_started();}
DispatchQueue(CephContext *cct, Messenger *msgr, std::string &name)
: cct(cct), msgr(msgr),
lock(ceph::make_mutex("Messenger::DispatchQueue::lock" + name)),
mqueue(cct->_conf->ms_pq_max_tokens_per_priority,
cct->_conf->ms_pq_min_cost),
next_id(1),
dispatch_thread(this),
local_delivery_lock(ceph::make_mutex("Messenger::DispatchQueue::local_delivery_lock" + name)),
stop_local_delivery(false),
local_delivery_thread(this),
dispatch_throttler(cct, std::string("msgr_dispatch_throttler-") + name,
cct->_conf->ms_dispatch_throttle_bytes),
stop(false)
{}
~DispatchQueue() {
ceph_assert(mqueue.empty());
ceph_assert(marrival.empty());
ceph_assert(local_messages.empty());
}
};
#endif
| 6,715 | 26.63786 | 100 |
h
|
null |
ceph-main/src/msg/Dispatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DISPATCHER_H
#define CEPH_DISPATCHER_H
#include <memory>
#include "include/buffer_fwd.h"
#include "include/ceph_assert.h"
#include "include/common_fwd.h"
#include "msg/MessageRef.h"
class Messenger;
class Connection;
class CryptoKey;
class KeyStore;
class Dispatcher {
public:
explicit Dispatcher(CephContext *cct_)
: cct(cct_)
{
}
virtual ~Dispatcher() { }
/**
* The Messenger calls this function to query if you are capable
* of "fast dispatch"ing a message. Indicating that you can fast
* dispatch it requires that you:
* 1) Handle the Message quickly and without taking long-term contended
* locks. (This function is likely to be called in-line with message
* receipt.)
* 2) Be able to accept the Message even if you have not yet received
* an ms_handle_accept() notification for the Connection it is associated
* with, and even if you *have* called mark_down() or received an
* ms_handle_reset() (or similar) call on the Connection. You will
* not receive more than one dead "message" (and should generally be
* prepared for that circumstance anyway, since the normal dispatch can begin,
* then trigger Connection failure before it's percolated through your system).
* We provide ms_handle_fast_[connect|accept] calls if you need them, under
* similar speed and state constraints as fast_dispatch itself.
* 3) Be able to make a determination on fast_dispatch without relying
* on particular system state -- the ms_can_fast_dispatch() call might
* be called multiple times on a single message; the state might change between
* calling ms_can_fast_dispatch and ms_fast_dispatch; etc.
*
* @param m The message we want to fast dispatch.
* @returns True if the message can be fast dispatched; false otherwise.
*/
virtual bool ms_can_fast_dispatch(const Message *m) const { return false; }
virtual bool ms_can_fast_dispatch2(const MessageConstRef& m) const {
return ms_can_fast_dispatch(m.get());
}
/**
* This function determines if a dispatcher is included in the
* list of fast-dispatch capable Dispatchers.
* @returns True if the Dispatcher can handle any messages via
* fast dispatch; false otherwise.
*/
virtual bool ms_can_fast_dispatch_any() const { return false; }
/**
* Perform a "fast dispatch" on a given message. See
* ms_can_fast_dispatch() for the requirements.
*
* @param m The Message to fast dispatch.
*/
virtual void ms_fast_dispatch(Message *m) { ceph_abort(); }
/* ms_fast_dispatch2 because otherwise the child must define both */
virtual void ms_fast_dispatch2(const MessageRef &m) {
/* allow old style dispatch handling that expects a Message * with a floating ref */
return ms_fast_dispatch(MessageRef(m).detach()); /* XXX N.B. always consumes ref */
}
/**
* Let the Dispatcher preview a Message before it is dispatched. This
* function is called on *every* Message, prior to the fast/regular dispatch
* decision point, but it is only used on fast-dispatch capable systems. An
* implementation of ms_fast_preprocess must be essentially lock-free in the
* same way as the ms_fast_dispatch function is (in particular, ms_fast_preprocess
* may be called while the Messenger holds internal locks that prevent progress from
* other threads, so any locks it takes must be at the very bottom of the hierarchy).
* Messages are delivered in receipt order within a single Connection, but there are
* no guarantees across Connections. This makes it useful for some limited
* coordination between Messages which can be fast_dispatch'ed and those which must
* go through normal dispatch.
*
* @param m A message which has been received
*/
virtual void ms_fast_preprocess(Message *m) {}
/* ms_fast_preprocess2 because otherwise the child must define both */
virtual void ms_fast_preprocess2(const MessageRef &m) {
/* allow old style dispatch handling that expects a Message* */
return ms_fast_preprocess(m.get());
}
/**
* The Messenger calls this function to deliver a single message.
*
* @param m The message being delivered. You (the Dispatcher)
* are given a single reference count on it.
*/
virtual bool ms_dispatch(Message *m) {
ceph_abort();
}
/* ms_dispatch2 because otherwise the child must define both */
virtual bool ms_dispatch2(const MessageRef &m) {
/* allow old style dispatch handling that expects a Message * with a floating ref */
MessageRef mr(m);
if (ms_dispatch(mr.get())) {
mr.detach(); /* dispatcher consumed ref */
return true;
}
return false;
}
/**
* This function will be called whenever a Connection is newly-created
* or reconnects in the Messenger.
*
* @param con The new Connection which has been established. You are not
* granted a reference to it -- take one if you need one!
*/
virtual void ms_handle_connect(Connection *con) {}
/**
* This function will be called synchronously whenever a Connection is
* newly-created or reconnects in the Messenger, if you support fast
* dispatch. It is guaranteed to be called before any messages are
* dispatched.
*
* @param con The new Connection which has been established. You are not
* granted a reference to it -- take one if you need one!
*/
virtual void ms_handle_fast_connect(Connection *con) {}
/**
* Callback indicating we have accepted an incoming connection.
*
* @param con The (new or existing) Connection associated with the session
*/
virtual void ms_handle_accept(Connection *con) {}
/**
* Callback indicating we have accepted an incoming connection, if you
* support fast dispatch. It is guaranteed to be called before any messages
* are dispatched.
*
* @param con The (new or existing) Connection associated with the session
*/
virtual void ms_handle_fast_accept(Connection *con) {}
/*
* this indicates that the ordered+reliable delivery semantics have
* been violated. Messages may have been lost due to a fault
* in the network connection.
* Only called on lossy Connections.
*
* @param con The Connection which broke. You are not granted
* a reference to it.
*/
virtual bool ms_handle_reset(Connection *con) = 0;
/**
* This indicates that the ordered+reliable delivery semantics
* have been violated because the remote somehow reset.
* It implies that incoming messages were dropped, and
* probably some of our previous outgoing messages were too.
*
* @param con The Connection which broke. You are not granted
* a reference to it.
*/
virtual void ms_handle_remote_reset(Connection *con) = 0;
/**
* This indicates that the connection is both broken and further
* connection attempts are failing because other side refuses
* it.
*
* @param con The Connection which broke. You are not granted
* a reference to it.
*/
virtual bool ms_handle_refused(Connection *con) = 0;
/**
* @defgroup Authentication
* @{
*/
/**
* handle successful authentication (msgr2)
*
* Authenticated result/state will be attached to the Connection.
*
* return 1 for success
* return 0 for no action (let another Dispatcher handle it)
* return <0 for failure (failure to parse caps, for instance)
*/
virtual int ms_handle_authentication(Connection *con) {
return 0;
}
/**
* @} //Authentication
*/
protected:
CephContext *cct;
private:
explicit Dispatcher(const Dispatcher &rhs);
Dispatcher& operator=(const Dispatcher &rhs);
};
#endif
| 8,062 | 34.209607 | 88 |
h
|
null |
ceph-main/src/msg/Message.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifdef ENCODE_DUMP
# include <typeinfo>
# include <cxxabi.h>
#endif
#include <iostream>
#include "include/types.h"
#include "global/global_context.h"
#include "Message.h"
#include "messages/MPGStats.h"
#include "messages/MGenericMessage.h"
#include "messages/MPGStatsAck.h"
#include "messages/MStatfs.h"
#include "messages/MStatfsReply.h"
#include "messages/MGetPoolStats.h"
#include "messages/MGetPoolStatsReply.h"
#include "messages/MPoolOp.h"
#include "messages/MPoolOpReply.h"
#include "messages/PaxosServiceMessage.h"
#include "messages/MMonCommand.h"
#include "messages/MMonCommandAck.h"
#include "messages/MMonPaxos.h"
#include "messages/MConfig.h"
#include "messages/MGetConfig.h"
#include "messages/MKVData.h"
#include "messages/MMonProbe.h"
#include "messages/MMonJoin.h"
#include "messages/MMonElection.h"
#include "messages/MMonSync.h"
#include "messages/MMonPing.h"
#include "messages/MMonScrub.h"
#include "messages/MLog.h"
#include "messages/MLogAck.h"
#include "messages/MPing.h"
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "messages/MBackfillReserve.h"
#include "messages/MRecoveryReserve.h"
#include "messages/MRoute.h"
#include "messages/MForward.h"
#include "messages/MOSDBoot.h"
#include "messages/MOSDAlive.h"
#include "messages/MOSDBeacon.h"
#include "messages/MOSDPGTemp.h"
#include "messages/MOSDFailure.h"
#include "messages/MOSDMarkMeDown.h"
#include "messages/MOSDMarkMeDead.h"
#include "messages/MOSDFull.h"
#include "messages/MOSDPing.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "messages/MOSDRepOp.h"
#include "messages/MOSDRepOpReply.h"
#include "messages/MOSDMap.h"
#include "messages/MMonGetOSDMap.h"
#include "messages/MMonGetPurgedSnaps.h"
#include "messages/MMonGetPurgedSnapsReply.h"
#include "messages/MOSDPGCreated.h"
#include "messages/MOSDPGNotify.h"
#include "messages/MOSDPGNotify2.h"
#include "messages/MOSDPGQuery.h"
#include "messages/MOSDPGQuery2.h"
#include "messages/MOSDPGLog.h"
#include "messages/MOSDPGRemove.h"
#include "messages/MOSDPGInfo.h"
#include "messages/MOSDPGInfo2.h"
#include "messages/MOSDPGCreate2.h"
#include "messages/MOSDPGTrim.h"
#include "messages/MOSDPGLease.h"
#include "messages/MOSDPGLeaseAck.h"
#include "messages/MOSDScrub2.h"
#include "messages/MOSDScrubReserve.h"
#include "messages/MOSDRepScrub.h"
#include "messages/MOSDRepScrubMap.h"
#include "messages/MOSDForceRecovery.h"
#include "messages/MOSDPGScan.h"
#include "messages/MOSDPGBackfill.h"
#include "messages/MOSDBackoff.h"
#include "messages/MOSDPGBackfillRemove.h"
#include "messages/MOSDPGRecoveryDelete.h"
#include "messages/MOSDPGRecoveryDeleteReply.h"
#include "messages/MOSDPGReadyToMerge.h"
#include "messages/MRemoveSnaps.h"
#include "messages/MMonMap.h"
#include "messages/MMonGetMap.h"
#include "messages/MMonGetVersion.h"
#include "messages/MMonGetVersionReply.h"
#include "messages/MMonHealth.h"
#include "messages/MMonHealthChecks.h"
#include "messages/MAuth.h"
#include "messages/MAuthReply.h"
#include "messages/MMonSubscribe.h"
#include "messages/MMonSubscribeAck.h"
#include "messages/MMonGlobalID.h"
#include "messages/MMonUsedPendingKeys.h"
#include "messages/MClientSession.h"
#include "messages/MClientReconnect.h"
#include "messages/MClientRequest.h"
#include "messages/MClientRequestForward.h"
#include "messages/MClientReply.h"
#include "messages/MClientReclaim.h"
#include "messages/MClientReclaimReply.h"
#include "messages/MClientCaps.h"
#include "messages/MClientCapRelease.h"
#include "messages/MClientLease.h"
#include "messages/MClientSnap.h"
#include "messages/MClientQuota.h"
#include "messages/MClientMetrics.h"
#include "messages/MMDSPeerRequest.h"
#include "messages/MMDSMap.h"
#include "messages/MFSMap.h"
#include "messages/MFSMapUser.h"
#include "messages/MMDSBeacon.h"
#include "messages/MMDSLoadTargets.h"
#include "messages/MMDSResolve.h"
#include "messages/MMDSResolveAck.h"
#include "messages/MMDSCacheRejoin.h"
#include "messages/MMDSFindIno.h"
#include "messages/MMDSFindInoReply.h"
#include "messages/MMDSOpenIno.h"
#include "messages/MMDSOpenInoReply.h"
#include "messages/MMDSSnapUpdate.h"
#include "messages/MMDSScrub.h"
#include "messages/MMDSScrubStats.h"
#include "messages/MDirUpdate.h"
#include "messages/MDiscover.h"
#include "messages/MDiscoverReply.h"
#include "messages/MMDSFragmentNotify.h"
#include "messages/MMDSFragmentNotifyAck.h"
#include "messages/MExportDirDiscover.h"
#include "messages/MExportDirDiscoverAck.h"
#include "messages/MExportDirCancel.h"
#include "messages/MExportDirPrep.h"
#include "messages/MExportDirPrepAck.h"
#include "messages/MExportDir.h"
#include "messages/MExportDirAck.h"
#include "messages/MExportDirNotify.h"
#include "messages/MExportDirNotifyAck.h"
#include "messages/MExportDirFinish.h"
#include "messages/MExportCaps.h"
#include "messages/MExportCapsAck.h"
#include "messages/MGatherCaps.h"
#include "messages/MDentryUnlink.h"
#include "messages/MDentryLink.h"
#include "messages/MHeartbeat.h"
#include "messages/MMDSTableRequest.h"
#include "messages/MMDSMetrics.h"
#include "messages/MMDSPing.h"
//#include "messages/MInodeUpdate.h"
#include "messages/MCacheExpire.h"
#include "messages/MInodeFileCaps.h"
#include "messages/MMgrBeacon.h"
#include "messages/MMgrMap.h"
#include "messages/MMgrDigest.h"
#include "messages/MMgrReport.h"
#include "messages/MMgrOpen.h"
#include "messages/MMgrUpdate.h"
#include "messages/MMgrClose.h"
#include "messages/MMgrConfigure.h"
#include "messages/MMonMgrReport.h"
#include "messages/MMgrCommand.h"
#include "messages/MMgrCommandReply.h"
#include "messages/MServiceMap.h"
#include "messages/MLock.h"
#include "messages/MWatchNotify.h"
#include "messages/MTimeCheck.h"
#include "messages/MTimeCheck2.h"
#include "common/config.h"
#include "messages/MOSDPGPush.h"
#include "messages/MOSDPGPushReply.h"
#include "messages/MOSDPGPull.h"
#include "messages/MOSDECSubOpWrite.h"
#include "messages/MOSDECSubOpWriteReply.h"
#include "messages/MOSDECSubOpRead.h"
#include "messages/MOSDECSubOpReadReply.h"
#include "messages/MOSDPGUpdateLogMissing.h"
#include "messages/MOSDPGUpdateLogMissingReply.h"
#ifdef WITH_BLKIN
#include "Messenger.h"
#endif
#define DEBUGLVL 10 // debug level of output
#define dout_subsys ceph_subsys_ms
void Message::encode(uint64_t features, int crcflags, bool skip_header_crc)
{
// encode and copy out of *m
if (empty_payload()) {
ceph_assert(middle.length() == 0);
encode_payload(features);
if (byte_throttler) {
byte_throttler->take(payload.length() + middle.length());
}
// if the encoder didn't specify past compatibility, we assume it
// is incompatible.
if (header.compat_version == 0)
header.compat_version = header.version;
}
if (crcflags & MSG_CRC_HEADER)
calc_front_crc();
// update envelope
header.front_len = get_payload().length();
header.middle_len = get_middle().length();
header.data_len = get_data().length();
if (!skip_header_crc && (crcflags & MSG_CRC_HEADER))
calc_header_crc();
footer.flags = CEPH_MSG_FOOTER_COMPLETE;
if (crcflags & MSG_CRC_DATA) {
calc_data_crc();
#ifdef ENCODE_DUMP
bufferlist bl;
encode(get_header(), bl);
// dump the old footer format
ceph_msg_footer_old old_footer;
old_footer.front_crc = footer.front_crc;
old_footer.middle_crc = footer.middle_crc;
old_footer.data_crc = footer.data_crc;
old_footer.flags = footer.flags;
encode(old_footer, bl);
encode(get_payload(), bl);
encode(get_middle(), bl);
encode(get_data(), bl);
// this is almost an exponential backoff, except because we count
// bits we tend to sample things we encode later, which should be
// more representative.
static int i = 0;
i++;
int bits = 0;
for (unsigned t = i; t; bits++)
t &= t - 1;
if (bits <= 2) {
char fn[200];
int status;
snprintf(fn, sizeof(fn), ENCODE_STRINGIFY(ENCODE_DUMP) "/%s__%d.%x",
abi::__cxa_demangle(typeid(*this).name(), 0, 0, &status),
getpid(), i++);
int fd = ::open(fn, O_WRONLY|O_TRUNC|O_CREAT|O_CLOEXEC|O_BINARY, 0644);
if (fd >= 0) {
bl.write_fd(fd);
::close(fd);
}
}
#endif
} else {
footer.flags = (unsigned)footer.flags | CEPH_MSG_FOOTER_NOCRC;
}
}
void Message::dump(ceph::Formatter *f) const
{
std::stringstream ss;
print(ss);
f->dump_string("summary", ss.str());
}
Message *decode_message(CephContext *cct,
int crcflags,
ceph_msg_header& header,
ceph_msg_footer& footer,
ceph::bufferlist& front,
ceph::bufferlist& middle,
ceph::bufferlist& data,
Message::ConnectionRef conn)
{
#ifdef WITH_SEASTAR
// In crimson, conn is independently maintained outside Message.
ceph_assert(conn == nullptr);
#endif
// verify crc
if (crcflags & MSG_CRC_HEADER) {
__u32 front_crc = front.crc32c(0);
__u32 middle_crc = middle.crc32c(0);
if (front_crc != footer.front_crc) {
if (cct) {
ldout(cct, 0) << "bad crc in front " << front_crc << " != exp " << footer.front_crc
#ifndef WITH_SEASTAR
<< " from " << conn->get_peer_addr()
#endif
<< dendl;
ldout(cct, 20) << " ";
front.hexdump(*_dout);
*_dout << dendl;
}
return 0;
}
if (middle_crc != footer.middle_crc) {
if (cct) {
ldout(cct, 0) << "bad crc in middle " << middle_crc << " != exp " << footer.middle_crc
#ifndef WITH_SEASTAR
<< " from " << conn->get_peer_addr()
#endif
<< dendl;
ldout(cct, 20) << " ";
middle.hexdump(*_dout);
*_dout << dendl;
}
return 0;
}
}
if (crcflags & MSG_CRC_DATA) {
if ((footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0) {
__u32 data_crc = data.crc32c(0);
if (data_crc != footer.data_crc) {
if (cct) {
ldout(cct, 0) << "bad crc in data " << data_crc << " != exp " << footer.data_crc
#ifndef WITH_SEASTAR
<< " from " << conn->get_peer_addr()
#endif
<< dendl;
ldout(cct, 20) << " ";
data.hexdump(*_dout);
*_dout << dendl;
}
return 0;
}
}
}
// make message
ceph::ref_t<Message> m;
int type = header.type;
switch (type) {
// -- with payload --
using ceph::make_message;
case MSG_PGSTATS:
m = make_message<MPGStats>();
break;
case MSG_PGSTATSACK:
m = make_message<MPGStatsAck>();
break;
case CEPH_MSG_STATFS:
m = make_message<MStatfs>();
break;
case CEPH_MSG_STATFS_REPLY:
m = make_message<MStatfsReply>();
break;
case MSG_GETPOOLSTATS:
m = make_message<MGetPoolStats>();
break;
case MSG_GETPOOLSTATSREPLY:
m = make_message<MGetPoolStatsReply>();
break;
case CEPH_MSG_POOLOP:
m = make_message<MPoolOp>();
break;
case CEPH_MSG_POOLOP_REPLY:
m = make_message<MPoolOpReply>();
break;
case MSG_MON_COMMAND:
m = make_message<MMonCommand>();
break;
case MSG_MON_COMMAND_ACK:
m = make_message<MMonCommandAck>();
break;
case MSG_MON_PAXOS:
m = make_message<MMonPaxos>();
break;
case MSG_CONFIG:
m = make_message<MConfig>();
break;
case MSG_GET_CONFIG:
m = make_message<MGetConfig>();
break;
case MSG_KV_DATA:
m = make_message<MKVData>();
break;
case MSG_MON_PROBE:
m = make_message<MMonProbe>();
break;
case MSG_MON_JOIN:
m = make_message<MMonJoin>();
break;
case MSG_MON_ELECTION:
m = make_message<MMonElection>();
break;
case MSG_MON_SYNC:
m = make_message<MMonSync>();
break;
case MSG_MON_PING:
m = make_message<MMonPing>();
break;
case MSG_MON_SCRUB:
m = make_message<MMonScrub>();
break;
case MSG_LOG:
m = make_message<MLog>();
break;
case MSG_LOGACK:
m = make_message<MLogAck>();
break;
case CEPH_MSG_PING:
m = make_message<MPing>();
break;
case MSG_COMMAND:
m = make_message<MCommand>();
break;
case MSG_COMMAND_REPLY:
m = make_message<MCommandReply>();
break;
case MSG_OSD_BACKFILL_RESERVE:
m = make_message<MBackfillReserve>();
break;
case MSG_OSD_RECOVERY_RESERVE:
m = make_message<MRecoveryReserve>();
break;
case MSG_OSD_FORCE_RECOVERY:
m = make_message<MOSDForceRecovery>();
break;
case MSG_ROUTE:
m = make_message<MRoute>();
break;
case MSG_FORWARD:
m = make_message<MForward>();
break;
case CEPH_MSG_MON_MAP:
m = make_message<MMonMap>();
break;
case CEPH_MSG_MON_GET_MAP:
m = make_message<MMonGetMap>();
break;
case CEPH_MSG_MON_GET_OSDMAP:
m = make_message<MMonGetOSDMap>();
break;
case MSG_MON_GET_PURGED_SNAPS:
m = make_message<MMonGetPurgedSnaps>();
break;
case MSG_MON_GET_PURGED_SNAPS_REPLY:
m = make_message<MMonGetPurgedSnapsReply>();
break;
case CEPH_MSG_MON_GET_VERSION:
m = make_message<MMonGetVersion>();
break;
case CEPH_MSG_MON_GET_VERSION_REPLY:
m = make_message<MMonGetVersionReply>();
break;
case MSG_OSD_BOOT:
m = make_message<MOSDBoot>();
break;
case MSG_OSD_ALIVE:
m = make_message<MOSDAlive>();
break;
case MSG_OSD_BEACON:
m = make_message<MOSDBeacon>();
break;
case MSG_OSD_PGTEMP:
m = make_message<MOSDPGTemp>();
break;
case MSG_OSD_FAILURE:
m = make_message<MOSDFailure>();
break;
case MSG_OSD_MARK_ME_DOWN:
m = make_message<MOSDMarkMeDown>();
break;
case MSG_OSD_MARK_ME_DEAD:
m = make_message<MOSDMarkMeDead>();
break;
case MSG_OSD_FULL:
m = make_message<MOSDFull>();
break;
case MSG_OSD_PING:
m = make_message<MOSDPing>();
break;
case CEPH_MSG_OSD_OP:
m = make_message<MOSDOp>();
break;
case CEPH_MSG_OSD_OPREPLY:
m = make_message<MOSDOpReply>();
break;
case MSG_OSD_REPOP:
m = make_message<MOSDRepOp>();
break;
case MSG_OSD_REPOPREPLY:
m = make_message<MOSDRepOpReply>();
break;
case MSG_OSD_PG_CREATED:
m = make_message<MOSDPGCreated>();
break;
case MSG_OSD_PG_UPDATE_LOG_MISSING:
m = make_message<MOSDPGUpdateLogMissing>();
break;
case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY:
m = make_message<MOSDPGUpdateLogMissingReply>();
break;
case CEPH_MSG_OSD_BACKOFF:
m = make_message<MOSDBackoff>();
break;
case CEPH_MSG_OSD_MAP:
m = make_message<MOSDMap>();
break;
case CEPH_MSG_WATCH_NOTIFY:
m = make_message<MWatchNotify>();
break;
case MSG_OSD_PG_NOTIFY:
m = make_message<MOSDPGNotify>();
break;
case MSG_OSD_PG_NOTIFY2:
m = make_message<MOSDPGNotify2>();
break;
case MSG_OSD_PG_QUERY:
m = make_message<MOSDPGQuery>();
break;
case MSG_OSD_PG_QUERY2:
m = make_message<MOSDPGQuery2>();
break;
case MSG_OSD_PG_LOG:
m = make_message<MOSDPGLog>();
break;
case MSG_OSD_PG_REMOVE:
m = make_message<MOSDPGRemove>();
break;
case MSG_OSD_PG_INFO:
m = make_message<MOSDPGInfo>();
break;
case MSG_OSD_PG_INFO2:
m = make_message<MOSDPGInfo2>();
break;
case MSG_OSD_PG_CREATE2:
m = make_message<MOSDPGCreate2>();
break;
case MSG_OSD_PG_TRIM:
m = make_message<MOSDPGTrim>();
break;
case MSG_OSD_PG_LEASE:
m = make_message<MOSDPGLease>();
break;
case MSG_OSD_PG_LEASE_ACK:
m = make_message<MOSDPGLeaseAck>();
break;
case MSG_OSD_SCRUB2:
m = make_message<MOSDScrub2>();
break;
case MSG_OSD_SCRUB_RESERVE:
m = make_message<MOSDScrubReserve>();
break;
case MSG_REMOVE_SNAPS:
m = make_message<MRemoveSnaps>();
break;
case MSG_OSD_REP_SCRUB:
m = make_message<MOSDRepScrub>();
break;
case MSG_OSD_REP_SCRUBMAP:
m = make_message<MOSDRepScrubMap>();
break;
case MSG_OSD_PG_SCAN:
m = make_message<MOSDPGScan>();
break;
case MSG_OSD_PG_BACKFILL:
m = make_message<MOSDPGBackfill>();
break;
case MSG_OSD_PG_BACKFILL_REMOVE:
m = make_message<MOSDPGBackfillRemove>();
break;
case MSG_OSD_PG_PUSH:
m = make_message<MOSDPGPush>();
break;
case MSG_OSD_PG_PULL:
m = make_message<MOSDPGPull>();
break;
case MSG_OSD_PG_PUSH_REPLY:
m = make_message<MOSDPGPushReply>();
break;
case MSG_OSD_PG_RECOVERY_DELETE:
m = make_message<MOSDPGRecoveryDelete>();
break;
case MSG_OSD_PG_RECOVERY_DELETE_REPLY:
m = make_message<MOSDPGRecoveryDeleteReply>();
break;
case MSG_OSD_PG_READY_TO_MERGE:
m = make_message<MOSDPGReadyToMerge>();
break;
case MSG_OSD_EC_WRITE:
m = make_message<MOSDECSubOpWrite>();
break;
case MSG_OSD_EC_WRITE_REPLY:
m = make_message<MOSDECSubOpWriteReply>();
break;
case MSG_OSD_EC_READ:
m = make_message<MOSDECSubOpRead>();
break;
case MSG_OSD_EC_READ_REPLY:
m = make_message<MOSDECSubOpReadReply>();
break;
// auth
case CEPH_MSG_AUTH:
m = make_message<MAuth>();
break;
case CEPH_MSG_AUTH_REPLY:
m = make_message<MAuthReply>();
break;
case MSG_MON_GLOBAL_ID:
m = make_message<MMonGlobalID>();
break;
case MSG_MON_USED_PENDING_KEYS:
m = make_message<MMonUsedPendingKeys>();
break;
// clients
case CEPH_MSG_MON_SUBSCRIBE:
m = make_message<MMonSubscribe>();
break;
case CEPH_MSG_MON_SUBSCRIBE_ACK:
m = make_message<MMonSubscribeAck>();
break;
case CEPH_MSG_CLIENT_SESSION:
m = make_message<MClientSession>();
break;
case CEPH_MSG_CLIENT_RECONNECT:
m = make_message<MClientReconnect>();
break;
case CEPH_MSG_CLIENT_REQUEST:
m = make_message<MClientRequest>();
break;
case CEPH_MSG_CLIENT_REQUEST_FORWARD:
m = make_message<MClientRequestForward>();
break;
case CEPH_MSG_CLIENT_REPLY:
m = make_message<MClientReply>();
break;
case CEPH_MSG_CLIENT_RECLAIM:
m = make_message<MClientReclaim>();
break;
case CEPH_MSG_CLIENT_RECLAIM_REPLY:
m = make_message<MClientReclaimReply>();
break;
case CEPH_MSG_CLIENT_CAPS:
m = make_message<MClientCaps>();
break;
case CEPH_MSG_CLIENT_CAPRELEASE:
m = make_message<MClientCapRelease>();
break;
case CEPH_MSG_CLIENT_LEASE:
m = make_message<MClientLease>();
break;
case CEPH_MSG_CLIENT_SNAP:
m = make_message<MClientSnap>();
break;
case CEPH_MSG_CLIENT_QUOTA:
m = make_message<MClientQuota>();
break;
case CEPH_MSG_CLIENT_METRICS:
m = make_message<MClientMetrics>();
break;
// mds
case MSG_MDS_PEER_REQUEST:
m = make_message<MMDSPeerRequest>();
break;
case CEPH_MSG_MDS_MAP:
m = make_message<MMDSMap>();
break;
case CEPH_MSG_FS_MAP:
m = make_message<MFSMap>();
break;
case CEPH_MSG_FS_MAP_USER:
m = make_message<MFSMapUser>();
break;
case MSG_MDS_BEACON:
m = make_message<MMDSBeacon>();
break;
case MSG_MDS_OFFLOAD_TARGETS:
m = make_message<MMDSLoadTargets>();
break;
case MSG_MDS_RESOLVE:
m = make_message<MMDSResolve>();
break;
case MSG_MDS_RESOLVEACK:
m = make_message<MMDSResolveAck>();
break;
case MSG_MDS_CACHEREJOIN:
m = make_message<MMDSCacheRejoin>();
break;
case MSG_MDS_DIRUPDATE:
m = make_message<MDirUpdate>();
break;
case MSG_MDS_DISCOVER:
m = make_message<MDiscover>();
break;
case MSG_MDS_DISCOVERREPLY:
m = make_message<MDiscoverReply>();
break;
case MSG_MDS_FINDINO:
m = make_message<MMDSFindIno>();
break;
case MSG_MDS_FINDINOREPLY:
m = make_message<MMDSFindInoReply>();
break;
case MSG_MDS_OPENINO:
m = make_message<MMDSOpenIno>();
break;
case MSG_MDS_OPENINOREPLY:
m = make_message<MMDSOpenInoReply>();
break;
case MSG_MDS_SNAPUPDATE:
m = make_message<MMDSSnapUpdate>();
break;
case MSG_MDS_FRAGMENTNOTIFY:
m = make_message<MMDSFragmentNotify>();
break;
case MSG_MDS_FRAGMENTNOTIFYACK:
m = make_message<MMDSFragmentNotifyAck>();
break;
case MSG_MDS_SCRUB:
m = make_message<MMDSScrub>();
break;
case MSG_MDS_SCRUB_STATS:
m = make_message<MMDSScrubStats>();
break;
case MSG_MDS_EXPORTDIRDISCOVER:
m = make_message<MExportDirDiscover>();
break;
case MSG_MDS_EXPORTDIRDISCOVERACK:
m = make_message<MExportDirDiscoverAck>();
break;
case MSG_MDS_EXPORTDIRCANCEL:
m = make_message<MExportDirCancel>();
break;
case MSG_MDS_EXPORTDIR:
m = make_message<MExportDir>();
break;
case MSG_MDS_EXPORTDIRACK:
m = make_message<MExportDirAck>();
break;
case MSG_MDS_EXPORTDIRFINISH:
m = make_message<MExportDirFinish>();
break;
case MSG_MDS_EXPORTDIRNOTIFY:
m = make_message<MExportDirNotify>();
break;
case MSG_MDS_EXPORTDIRNOTIFYACK:
m = make_message<MExportDirNotifyAck>();
break;
case MSG_MDS_EXPORTDIRPREP:
m = make_message<MExportDirPrep>();
break;
case MSG_MDS_EXPORTDIRPREPACK:
m = make_message<MExportDirPrepAck>();
break;
case MSG_MDS_EXPORTCAPS:
m = make_message<MExportCaps>();
break;
case MSG_MDS_EXPORTCAPSACK:
m = make_message<MExportCapsAck>();
break;
case MSG_MDS_GATHERCAPS:
m = make_message<MGatherCaps>();
break;
case MSG_MDS_DENTRYUNLINK_ACK:
m = make_message<MDentryUnlinkAck>();
break;
case MSG_MDS_DENTRYUNLINK:
m = make_message<MDentryUnlink>();
break;
case MSG_MDS_DENTRYLINK:
m = make_message<MDentryLink>();
break;
case MSG_MDS_HEARTBEAT:
m = make_message<MHeartbeat>();
break;
case MSG_MDS_CACHEEXPIRE:
m = make_message<MCacheExpire>();
break;
case MSG_MDS_TABLE_REQUEST:
m = make_message<MMDSTableRequest>();
break;
/* case MSG_MDS_INODEUPDATE:
m = make_message<MInodeUpdate>();
break;
*/
case MSG_MDS_INODEFILECAPS:
m = make_message<MInodeFileCaps>();
break;
case MSG_MDS_LOCK:
m = make_message<MLock>();
break;
case MSG_MDS_METRICS:
m = make_message<MMDSMetrics>();
break;
case MSG_MDS_PING:
m = make_message<MMDSPing>();
break;
case MSG_MGR_BEACON:
m = make_message<MMgrBeacon>();
break;
case MSG_MON_MGR_REPORT:
m = make_message<MMonMgrReport>();
break;
case MSG_SERVICE_MAP:
m = make_message<MServiceMap>();
break;
case MSG_MGR_MAP:
m = make_message<MMgrMap>();
break;
case MSG_MGR_DIGEST:
m = make_message<MMgrDigest>();
break;
case MSG_MGR_COMMAND:
m = make_message<MMgrCommand>();
break;
case MSG_MGR_COMMAND_REPLY:
m = make_message<MMgrCommandReply>();
break;
case MSG_MGR_OPEN:
m = make_message<MMgrOpen>();
break;
case MSG_MGR_UPDATE:
m = make_message<MMgrUpdate>();
break;
case MSG_MGR_CLOSE:
m = make_message<MMgrClose>();
break;
case MSG_MGR_REPORT:
m = make_message<MMgrReport>();
break;
case MSG_MGR_CONFIGURE:
m = make_message<MMgrConfigure>();
break;
case MSG_TIMECHECK:
m = make_message<MTimeCheck>();
break;
case MSG_TIMECHECK2:
m = make_message<MTimeCheck2>();
break;
case MSG_MON_HEALTH:
m = make_message<MMonHealth>();
break;
case MSG_MON_HEALTH_CHECKS:
m = make_message<MMonHealthChecks>();
break;
// -- simple messages without payload --
case CEPH_MSG_SHUTDOWN:
m = make_message<MGenericMessage>(type);
break;
default:
if (cct) {
ldout(cct, 0) << "can't decode unknown message type " << type << " MSG_AUTH=" << CEPH_MSG_AUTH << dendl;
if (cct->_conf->ms_die_on_bad_msg)
ceph_abort();
}
return 0;
}
m->set_cct(cct);
// m->header.version, if non-zero, should be populated with the
// newest version of the encoding the code supports. If set, check
// it against compat_version.
if (m->get_header().version &&
m->get_header().version < header.compat_version) {
if (cct) {
ldout(cct, 0) << "will not decode message of type " << type
<< " version " << header.version
<< " because compat_version " << header.compat_version
<< " > supported version " << m->get_header().version << dendl;
if (cct->_conf->ms_die_on_bad_msg)
ceph_abort();
}
return 0;
}
m->set_connection(std::move(conn));
m->set_header(header);
m->set_footer(footer);
m->set_payload(front);
m->set_middle(middle);
m->set_data(data);
try {
m->decode_payload();
}
catch (const ceph::buffer::error &e) {
if (cct) {
lderr(cct) << "failed to decode message of type " << type
<< " v" << header.version
<< ": " << e.what() << dendl;
ldout(cct, ceph::dout::need_dynamic(
cct->_conf->ms_dump_corrupt_message_level)) << "dump: \n";
m->get_payload().hexdump(*_dout);
*_dout << dendl;
if (cct->_conf->ms_die_on_bad_msg)
ceph_abort();
}
return 0;
}
// done!
return m.detach();
}
void Message::encode_trace(ceph::bufferlist &bl, uint64_t features) const
{
using ceph::encode;
auto p = trace.get_info();
static const blkin_trace_info empty = { 0, 0, 0 };
if (!p) {
p = ∅
}
encode(*p, bl);
}
void Message::decode_trace(ceph::bufferlist::const_iterator &p, bool create)
{
blkin_trace_info info = {};
decode(info, p);
#ifdef WITH_BLKIN
if (!connection)
return;
const auto msgr = connection->get_messenger();
const auto endpoint = msgr->get_trace_endpoint();
if (info.trace_id) {
trace.init(get_type_name().data(), endpoint, &info, true);
trace.event("decoded trace");
} else if (create || (msgr->get_myname().is_osd() &&
msgr->cct->_conf->osd_blkin_trace_all)) {
// create a trace even if we didn't get one on the wire
trace.init(get_type_name().data(), endpoint);
trace.event("created trace");
}
trace.keyval("tid", get_tid());
trace.keyval("entity type", get_source().type_str());
trace.keyval("entity num", get_source().num());
#endif
}
// This routine is not used for ordinary messages, but only when encapsulating a message
// for forwarding and routing. It's also used in a backward compatibility test, which only
// effectively tests backward compability for those functions. To avoid backward compatibility
// problems, we currently always encode and decode using the old footer format that doesn't
// allow for message authentication. Eventually we should fix that. PLR
void encode_message(Message *msg, uint64_t features, ceph::bufferlist& payload)
{
ceph_msg_footer_old old_footer;
msg->encode(features, MSG_CRC_ALL);
encode(msg->get_header(), payload);
// Here's where we switch to the old footer format. PLR
ceph_msg_footer footer = msg->get_footer();
old_footer.front_crc = footer.front_crc;
old_footer.middle_crc = footer.middle_crc;
old_footer.data_crc = footer.data_crc;
old_footer.flags = footer.flags;
encode(old_footer, payload);
using ceph::encode;
encode(msg->get_payload(), payload);
encode(msg->get_middle(), payload);
encode(msg->get_data(), payload);
}
// See above for somewhat bogus use of the old message footer. We switch to the current footer
// after decoding the old one so the other form of decode_message() doesn't have to change.
// We've slipped in a 0 signature at this point, so any signature checking after this will
// fail. PLR
Message *decode_message(CephContext *cct, int crcflags, ceph::bufferlist::const_iterator& p)
{
ceph_msg_header h;
ceph_msg_footer_old fo;
ceph_msg_footer f;
ceph::bufferlist fr, mi, da;
decode(h, p);
decode(fo, p);
f.front_crc = fo.front_crc;
f.middle_crc = fo.middle_crc;
f.data_crc = fo.data_crc;
f.flags = fo.flags;
f.sig = 0;
using ceph::decode;
decode(fr, p);
decode(mi, p);
decode(da, p);
return decode_message(cct, crcflags, h, f, fr, mi, da, nullptr);
}
| 27,917 | 24.612844 | 110 |
cc
|
null |
ceph-main/src/msg/Message.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MESSAGE_H
#define CEPH_MESSAGE_H
#include <concepts>
#include <cstdlib>
#include <ostream>
#include <string_view>
#include <boost/intrusive/list.hpp>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
#include "include/Context.h"
#include "common/RefCountedObj.h"
#include "common/ThrottleInterface.h"
#include "common/config.h"
#include "common/ref.h"
#include "common/debug.h"
#include "common/zipkin_trace.h"
#include "include/ceph_assert.h" // Because intrusive_ptr clobbers our assert...
#include "include/buffer.h"
#include "include/types.h"
#include "msg/Connection.h"
#include "msg/MessageRef.h"
#include "msg_types.h"
// monitor internal
#define MSG_MON_SCRUB 64
#define MSG_MON_ELECTION 65
#define MSG_MON_PAXOS 66
#define MSG_MON_PROBE 67
#define MSG_MON_JOIN 68
#define MSG_MON_SYNC 69
#define MSG_MON_PING 140
/* monitor <-> mon admin tool */
#define MSG_MON_COMMAND 50
#define MSG_MON_COMMAND_ACK 51
#define MSG_LOG 52
#define MSG_LOGACK 53
#define MSG_GETPOOLSTATS 58
#define MSG_GETPOOLSTATSREPLY 59
#define MSG_MON_GLOBAL_ID 60
#define MSG_MON_USED_PENDING_KEYS 141
#define MSG_ROUTE 47
#define MSG_FORWARD 46
#define MSG_PAXOS 40
#define MSG_CONFIG 62
#define MSG_GET_CONFIG 63
#define MSG_KV_DATA 54
#define MSG_MON_GET_PURGED_SNAPS 76
#define MSG_MON_GET_PURGED_SNAPS_REPLY 77
// osd internal
#define MSG_OSD_PING 70
#define MSG_OSD_BOOT 71
#define MSG_OSD_FAILURE 72
#define MSG_OSD_ALIVE 73
#define MSG_OSD_MARK_ME_DOWN 74
#define MSG_OSD_FULL 75
#define MSG_OSD_MARK_ME_DEAD 123
// removed right after luminous
//#define MSG_OSD_SUBOP 76
//#define MSG_OSD_SUBOPREPLY 77
#define MSG_OSD_PGTEMP 78
#define MSG_OSD_BEACON 79
#define MSG_OSD_PG_NOTIFY 80
#define MSG_OSD_PG_NOTIFY2 130
#define MSG_OSD_PG_QUERY 81
#define MSG_OSD_PG_QUERY2 131
#define MSG_OSD_PG_LOG 83
#define MSG_OSD_PG_REMOVE 84
#define MSG_OSD_PG_INFO 85
#define MSG_OSD_PG_INFO2 132
#define MSG_OSD_PG_TRIM 86
#define MSG_PGSTATS 87
#define MSG_PGSTATSACK 88
#define MSG_OSD_PG_CREATE 89
#define MSG_REMOVE_SNAPS 90
#define MSG_OSD_SCRUB 91
#define MSG_OSD_SCRUB_RESERVE 92 // previous PG_MISSING
#define MSG_OSD_REP_SCRUB 93
#define MSG_OSD_PG_SCAN 94
#define MSG_OSD_PG_BACKFILL 95
#define MSG_OSD_PG_BACKFILL_REMOVE 96
#define MSG_COMMAND 97
#define MSG_COMMAND_REPLY 98
#define MSG_OSD_BACKFILL_RESERVE 99
#define MSG_OSD_RECOVERY_RESERVE 150
#define MSG_OSD_FORCE_RECOVERY 151
#define MSG_OSD_PG_PUSH 105
#define MSG_OSD_PG_PULL 106
#define MSG_OSD_PG_PUSH_REPLY 107
#define MSG_OSD_EC_WRITE 108
#define MSG_OSD_EC_WRITE_REPLY 109
#define MSG_OSD_EC_READ 110
#define MSG_OSD_EC_READ_REPLY 111
#define MSG_OSD_REPOP 112
#define MSG_OSD_REPOPREPLY 113
#define MSG_OSD_PG_UPDATE_LOG_MISSING 114
#define MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY 115
#define MSG_OSD_PG_CREATED 116
#define MSG_OSD_REP_SCRUBMAP 117
#define MSG_OSD_PG_RECOVERY_DELETE 118
#define MSG_OSD_PG_RECOVERY_DELETE_REPLY 119
#define MSG_OSD_PG_CREATE2 120
#define MSG_OSD_SCRUB2 121
#define MSG_OSD_PG_READY_TO_MERGE 122
#define MSG_OSD_PG_LEASE 133
#define MSG_OSD_PG_LEASE_ACK 134
// *** MDS ***
#define MSG_MDS_BEACON 100 // to monitor
#define MSG_MDS_PEER_REQUEST 101
#define MSG_MDS_TABLE_REQUEST 102
#define MSG_MDS_SCRUB 135
// 150 already in use (MSG_OSD_RECOVERY_RESERVE)
#define MSG_MDS_RESOLVE 0x200 // 0x2xx are for mdcache of mds
#define MSG_MDS_RESOLVEACK 0x201
#define MSG_MDS_CACHEREJOIN 0x202
#define MSG_MDS_DISCOVER 0x203
#define MSG_MDS_DISCOVERREPLY 0x204
#define MSG_MDS_INODEUPDATE 0x205
#define MSG_MDS_DIRUPDATE 0x206
#define MSG_MDS_CACHEEXPIRE 0x207
#define MSG_MDS_DENTRYUNLINK 0x208
#define MSG_MDS_FRAGMENTNOTIFY 0x209
#define MSG_MDS_OFFLOAD_TARGETS 0x20a
#define MSG_MDS_DENTRYLINK 0x20c
#define MSG_MDS_FINDINO 0x20d
#define MSG_MDS_FINDINOREPLY 0x20e
#define MSG_MDS_OPENINO 0x20f
#define MSG_MDS_OPENINOREPLY 0x210
#define MSG_MDS_SNAPUPDATE 0x211
#define MSG_MDS_FRAGMENTNOTIFYACK 0x212
#define MSG_MDS_DENTRYUNLINK_ACK 0x213
#define MSG_MDS_LOCK 0x300 // 0x3xx are for locker of mds
#define MSG_MDS_INODEFILECAPS 0x301
#define MSG_MDS_EXPORTDIRDISCOVER 0x449 // 0x4xx are for migrator of mds
#define MSG_MDS_EXPORTDIRDISCOVERACK 0x450
#define MSG_MDS_EXPORTDIRCANCEL 0x451
#define MSG_MDS_EXPORTDIRPREP 0x452
#define MSG_MDS_EXPORTDIRPREPACK 0x453
#define MSG_MDS_EXPORTDIRWARNING 0x454
#define MSG_MDS_EXPORTDIRWARNINGACK 0x455
#define MSG_MDS_EXPORTDIR 0x456
#define MSG_MDS_EXPORTDIRACK 0x457
#define MSG_MDS_EXPORTDIRNOTIFY 0x458
#define MSG_MDS_EXPORTDIRNOTIFYACK 0x459
#define MSG_MDS_EXPORTDIRFINISH 0x460
#define MSG_MDS_EXPORTCAPS 0x470
#define MSG_MDS_EXPORTCAPSACK 0x471
#define MSG_MDS_GATHERCAPS 0x472
#define MSG_MDS_HEARTBEAT 0x500 // for mds load balancer
#define MSG_MDS_METRICS 0x501 // for mds metric aggregator
#define MSG_MDS_PING 0x502 // for mds pinger
#define MSG_MDS_SCRUB_STATS 0x503 // for mds scrub stack
// *** generic ***
#define MSG_TIMECHECK 0x600
#define MSG_MON_HEALTH 0x601
// *** Message::encode() crcflags bits ***
#define MSG_CRC_DATA (1 << 0)
#define MSG_CRC_HEADER (1 << 1)
#define MSG_CRC_ALL (MSG_CRC_DATA | MSG_CRC_HEADER)
// Special
#define MSG_NOP 0x607
#define MSG_MON_HEALTH_CHECKS 0x608
#define MSG_TIMECHECK2 0x609
// *** ceph-mgr <-> OSD/MDS daemons ***
#define MSG_MGR_OPEN 0x700
#define MSG_MGR_CONFIGURE 0x701
#define MSG_MGR_REPORT 0x702
// *** ceph-mgr <-> ceph-mon ***
#define MSG_MGR_BEACON 0x703
// *** ceph-mon(MgrMonitor) -> OSD/MDS daemons ***
#define MSG_MGR_MAP 0x704
// *** ceph-mon(MgrMonitor) -> ceph-mgr
#define MSG_MGR_DIGEST 0x705
// *** cephmgr -> ceph-mon
#define MSG_MON_MGR_REPORT 0x706
#define MSG_SERVICE_MAP 0x707
#define MSG_MGR_CLOSE 0x708
#define MSG_MGR_COMMAND 0x709
#define MSG_MGR_COMMAND_REPLY 0x70a
// *** ceph-mgr <-> MON daemons ***
#define MSG_MGR_UPDATE 0x70b
// ======================================================
// abstract Message class
class Message : public RefCountedObject {
public:
#ifdef WITH_SEASTAR
// In crimson, conn is independently maintained outside Message.
using ConnectionRef = void*;
#else
using ConnectionRef = ::ConnectionRef;
#endif
protected:
ceph_msg_header header; // headerelope
ceph_msg_footer footer;
ceph::buffer::list payload; // "front" unaligned blob
ceph::buffer::list middle; // "middle" unaligned blob
ceph::buffer::list data; // data payload (page-alignment will be preserved where possible)
/* recv_stamp is set when the Messenger starts reading the
* Message off the wire */
utime_t recv_stamp;
/* dispatch_stamp is set when the Messenger starts calling dispatch() on
* its endpoints */
utime_t dispatch_stamp;
/* throttle_stamp is the point at which we got throttle */
utime_t throttle_stamp;
/* time at which message was fully read */
utime_t recv_complete_stamp;
ConnectionRef connection;
uint32_t magic = 0;
boost::intrusive::list_member_hook<> dispatch_q;
public:
// zipkin tracing
ZTracer::Trace trace;
void encode_trace(ceph::buffer::list &bl, uint64_t features) const;
void decode_trace(ceph::buffer::list::const_iterator &p, bool create = false);
class CompletionHook : public Context {
protected:
Message *m;
friend class Message;
public:
explicit CompletionHook(Message *_m) : m(_m) {}
virtual void set_message(Message *_m) { m = _m; }
};
typedef boost::intrusive::list<Message,
boost::intrusive::member_hook<
Message,
boost::intrusive::list_member_hook<>,
&Message::dispatch_q>> Queue;
ceph::mono_time queue_start;
protected:
CompletionHook* completion_hook = nullptr; // owned by Messenger
// release our size in bytes back to this throttler when our payload
// is adjusted or when we are destroyed.
ThrottleInterface *byte_throttler = nullptr;
// release a count back to this throttler when we are destroyed
ThrottleInterface *msg_throttler = nullptr;
// keep track of how big this message was when we reserved space in
// the msgr dispatch_throttler, so that we can properly release it
// later. this is necessary because messages can enter the dispatch
// queue locally (not via read_message()), and those are not
// currently throttled.
uint64_t dispatch_throttle_size = 0;
friend class Messenger;
public:
Message() {
memset(&header, 0, sizeof(header));
memset(&footer, 0, sizeof(footer));
}
Message(int t, int version=1, int compat_version=0) {
memset(&header, 0, sizeof(header));
header.type = t;
header.version = version;
header.compat_version = compat_version;
memset(&footer, 0, sizeof(footer));
}
Message *get() {
return static_cast<Message *>(RefCountedObject::get());
}
protected:
~Message() override {
if (byte_throttler)
byte_throttler->put(payload.length() + middle.length() + data.length());
release_message_throttle();
trace.event("message destructed");
/* call completion hooks (if any) */
if (completion_hook)
completion_hook->complete(0);
}
public:
const ConnectionRef& get_connection() const {
#ifdef WITH_SEASTAR
ceph_abort("In crimson, conn is independently maintained outside Message");
#endif
return connection;
}
void set_connection(ConnectionRef c) {
#ifdef WITH_SEASTAR
// In crimson, conn is independently maintained outside Message.
ceph_assert(c == nullptr);
#endif
connection = std::move(c);
}
CompletionHook* get_completion_hook() { return completion_hook; }
void set_completion_hook(CompletionHook *hook) { completion_hook = hook; }
void set_byte_throttler(ThrottleInterface *t) {
byte_throttler = t;
}
void set_message_throttler(ThrottleInterface *t) {
msg_throttler = t;
}
void set_dispatch_throttle_size(uint64_t s) { dispatch_throttle_size = s; }
uint64_t get_dispatch_throttle_size() const { return dispatch_throttle_size; }
const ceph_msg_header &get_header() const { return header; }
ceph_msg_header &get_header() { return header; }
void set_header(const ceph_msg_header &e) { header = e; }
void set_footer(const ceph_msg_footer &e) { footer = e; }
const ceph_msg_footer &get_footer() const { return footer; }
ceph_msg_footer &get_footer() { return footer; }
void set_src(const entity_name_t& src) { header.src = src; }
uint32_t get_magic() const { return magic; }
void set_magic(int _magic) { magic = _magic; }
/*
* If you use get_[data, middle, payload] you shouldn't
* use it to change those ceph::buffer::lists unless you KNOW
* there is no throttle being used. The other
* functions are throttling-aware as appropriate.
*/
void clear_payload() {
if (byte_throttler) {
byte_throttler->put(payload.length() + middle.length());
}
payload.clear();
middle.clear();
}
virtual void clear_buffers() {}
void clear_data() {
if (byte_throttler)
byte_throttler->put(data.length());
data.clear();
clear_buffers(); // let subclass drop buffers as well
}
void release_message_throttle() {
if (msg_throttler)
msg_throttler->put();
msg_throttler = nullptr;
}
bool empty_payload() const { return payload.length() == 0; }
ceph::buffer::list& get_payload() { return payload; }
const ceph::buffer::list& get_payload() const { return payload; }
void set_payload(ceph::buffer::list& bl) {
if (byte_throttler)
byte_throttler->put(payload.length());
payload = std::move(bl);
if (byte_throttler)
byte_throttler->take(payload.length());
}
void set_middle(ceph::buffer::list& bl) {
if (byte_throttler)
byte_throttler->put(middle.length());
middle = std::move(bl);
if (byte_throttler)
byte_throttler->take(middle.length());
}
ceph::buffer::list& get_middle() { return middle; }
void set_data(const ceph::buffer::list &bl) {
if (byte_throttler)
byte_throttler->put(data.length());
data.share(bl);
if (byte_throttler)
byte_throttler->take(data.length());
}
const ceph::buffer::list& get_data() const { return data; }
ceph::buffer::list& get_data() { return data; }
void claim_data(ceph::buffer::list& bl) {
if (byte_throttler)
byte_throttler->put(data.length());
bl = std::move(data);
}
uint32_t get_data_len() const { return data.length(); }
void set_recv_stamp(utime_t t) { recv_stamp = t; }
const utime_t& get_recv_stamp() const { return recv_stamp; }
void set_dispatch_stamp(utime_t t) { dispatch_stamp = t; }
const utime_t& get_dispatch_stamp() const { return dispatch_stamp; }
void set_throttle_stamp(utime_t t) { throttle_stamp = t; }
const utime_t& get_throttle_stamp() const { return throttle_stamp; }
void set_recv_complete_stamp(utime_t t) { recv_complete_stamp = t; }
const utime_t& get_recv_complete_stamp() const { return recv_complete_stamp; }
void calc_header_crc() {
header.crc = ceph_crc32c(0, (unsigned char*)&header,
sizeof(header) - sizeof(header.crc));
}
void calc_front_crc() {
footer.front_crc = payload.crc32c(0);
footer.middle_crc = middle.crc32c(0);
}
void calc_data_crc() {
footer.data_crc = data.crc32c(0);
}
virtual int get_cost() const {
return data.length();
}
// type
int get_type() const { return header.type; }
void set_type(int t) { header.type = t; }
uint64_t get_tid() const { return header.tid; }
void set_tid(uint64_t t) { header.tid = t; }
uint64_t get_seq() const { return header.seq; }
void set_seq(uint64_t s) { header.seq = s; }
unsigned get_priority() const { return header.priority; }
void set_priority(__s16 p) { header.priority = p; }
// source/dest
entity_inst_t get_source_inst() const {
return entity_inst_t(get_source(), get_source_addr());
}
entity_name_t get_source() const {
return entity_name_t(header.src);
}
entity_addr_t get_source_addr() const {
#ifdef WITH_SEASTAR
ceph_abort("In crimson, conn is independently maintained outside Message");
#else
if (connection)
return connection->get_peer_addr();
#endif
return entity_addr_t();
}
entity_addrvec_t get_source_addrs() const {
#ifdef WITH_SEASTAR
ceph_abort("In crimson, conn is independently maintained outside Message");
#else
if (connection)
return connection->get_peer_addrs();
#endif
return entity_addrvec_t();
}
// forwarded?
entity_inst_t get_orig_source_inst() const {
return get_source_inst();
}
entity_name_t get_orig_source() const {
return get_source();
}
entity_addr_t get_orig_source_addr() const {
return get_source_addr();
}
entity_addrvec_t get_orig_source_addrs() const {
return get_source_addrs();
}
// virtual bits
virtual void decode_payload() = 0;
virtual void encode_payload(uint64_t features) = 0;
virtual std::string_view get_type_name() const = 0;
virtual void print(std::ostream& out) const {
out << get_type_name() << " magic: " << magic;
}
virtual void dump(ceph::Formatter *f) const;
void encode(uint64_t features, int crcflags, bool skip_header_crc = false);
};
extern Message *decode_message(CephContext *cct,
int crcflags,
ceph_msg_header& header,
ceph_msg_footer& footer,
ceph::buffer::list& front,
ceph::buffer::list& middle,
ceph::buffer::list& data,
Message::ConnectionRef conn);
inline std::ostream& operator<<(std::ostream& out, const Message& m) {
m.print(out);
if (m.get_header().version)
out << " v" << m.get_header().version;
return out;
}
extern void encode_message(Message *m, uint64_t features, ceph::buffer::list& bl);
extern Message *decode_message(CephContext *cct, int crcflags,
ceph::buffer::list::const_iterator& bl);
/// this is a "safe" version of Message. it does not allow calling get/put
/// methods on its derived classes. This is intended to prevent some accidental
/// reference leaks by forcing . Instead, you must either cast the derived class to a
/// RefCountedObject to do the get/put or detach a temporary reference.
class SafeMessage : public Message {
public:
using Message::Message;
bool is_a_client() const {
#ifdef WITH_SEASTAR
ceph_abort("In crimson, conn is independently maintained outside Message");
#else
return get_connection()->get_peer_type() == CEPH_ENTITY_TYPE_CLIENT;
#endif
}
private:
using RefCountedObject::get;
using RefCountedObject::put;
};
namespace ceph {
template<class T, typename... Args>
ceph::ref_t<T> make_message(Args&&... args) {
return {new T(std::forward<Args>(args)...), false};
}
}
namespace crimson {
template<class T, typename... Args>
MURef<T> make_message(Args&&... args) {
return {new T(std::forward<Args>(args)...), TOPNSPC::common::UniquePtrDeleter{}};
}
}
template <std::derived_from<Message> M>
struct fmt::formatter<M> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const M& m, FormatContext& ctx) const {
std::ostringstream oss;
m.print(oss);
if (auto ver = m.get_header().version; ver) {
return fmt::format_to(ctx.out(), "{} v{}", oss.str(), ver);
} else {
return fmt::format_to(ctx.out(), "{}", oss.str());
}
}
};
#endif
| 18,899 | 29.983607 | 102 |
h
|
null |
ceph-main/src/msg/MessageRef.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc. <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MESSAGEREF_H
#define CEPH_MESSAGEREF_H
#include <boost/intrusive_ptr.hpp>
#include "common/RefCountedObj.h"
template<typename T>
using MRef = boost::intrusive_ptr<T>;
template<typename T>
using MConstRef = boost::intrusive_ptr<T const>;
template<typename T>
using MURef = std::unique_ptr<T, TOPNSPC::common::UniquePtrDeleter>;
using MessageRef = MRef<class Message>;
using MessageConstRef = MConstRef<class Message>;
using MessageURef = MURef<class Message>;
/* cd src/messages/ && for f in *; do printf 'class '; basename "$f" .h | tr -d '\n'; printf ';\n'; done >> ../msg/MessageRef.h */
class MAuth;
class MAuthReply;
class MBackfillReserve;
class MCacheExpire;
class MClientCapRelease;
class MClientCaps;
class MClientLease;
class MClientQuota;
class MClientReclaim;
class MClientReclaimReply;
class MClientReconnect;
class MClientReply;
class MClientRequestForward;
class MClientRequest;
class MClientSession;
class MClientSnap;
class MCommand;
class MCommandReply;
class MConfig;
class MDentryLink;
class MDentryUnlink;
class MDirUpdate;
class MDiscover;
class MDiscoverReply;
class MExportCapsAck;
class MExportCaps;
class MExportDirAck;
class MExportDirCancel;
class MExportDirDiscoverAck;
class MExportDirDiscover;
class MExportDirFinish;
class MExportDir;
class MExportDirNotifyAck;
class MExportDirNotify;
class MExportDirPrepAck;
class MExportDirPrep;
class MForward;
class MFSMap;
class MFSMapUser;
class MGatherCaps;
class MGenericMessage;
class MGetConfig;
class MGetPoolStats;
class MGetPoolStatsReply;
class MHeartbeat;
class MInodeFileCaps;
class MLock;
class MLogAck;
class MLog;
class MMDSBeacon;
class MMDSCacheRejoin;
class MMDSFindIno;
class MMDSFindInoReply;
class MMDSFragmentNotifyAck;
class MMDSFragmentNotify;
class MMDSLoadTargets;
class MMDSMap;
class MMDSOpenIno;
class MMDSOpenInoReply;
class MMDSResolveAck;
class MMDSResolve;
class MMDSPeerRequest;
class MMDSSnapUpdate;
class MMDSTableRequest;
class MMgrBeacon;
class MMgrClose;
class MMgrConfigure;
class MMgrDigest;
class MMgrMap;
class MMgrOpen;
class MMgrUpdate;
class MMgrReport;
class MMonCommandAck;
class MMonCommand;
class MMonElection;
class MMonGetMap;
class MMonGetOSDMap;
class MMonGetVersion;
class MMonGetVersionReply;
class MMonGlobalID;
class MMonHealthChecks;
class MMonHealth;
class MMonJoin;
class MMonMap;
class MMonMetadata;
class MMonMgrReport;
class MMonPaxos;
class MMonProbe;
class MMonQuorumService;
class MMonScrub;
class MMonSubscribeAck;
class MMonSubscribe;
class MMonSync;
class MOSDAlive;
class MOSDBackoff;
class MOSDBeacon;
class MOSDBoot;
class MOSDECSubOpRead;
class MOSDECSubOpReadReply;
class MOSDECSubOpWrite;
class MOSDECSubOpWriteReply;
class MOSDFailure;
class MOSDFastDispatchOp;
class MOSDForceRecovery;
class MOSDFull;
class MOSDMap;
class MOSDMarkMeDown;
class MOSDPeeringOp;
class MOSDPGBackfill;
class MOSDPGBackfillRemove;
class MOSDPGCreate2;
class MOSDPGCreated;
class MOSDPGInfo;
class MOSDPGLog;
class MOSDPGNotify;
class MOSDPGPull;
class MOSDPGPush;
class MOSDPGPushReply;
class MOSDPGQuery;
class MOSDPGReadyToMerge;
class MOSDPGRecoveryDelete;
class MOSDPGRecoveryDeleteReply;
class MOSDPGRemove;
class MOSDPGScan;
class MOSDPGTemp;
class MOSDPGTrim;
class MOSDPGUpdateLogMissing;
class MOSDPGUpdateLogMissingReply;
class MOSDPing;
class MOSDRepOp;
class MOSDRepOpReply;
class MOSDRepScrub;
class MOSDRepScrubMap;
class MOSDScrub2;
class MOSDScrubReserve;
class MPGStatsAck;
class MPGStats;
class MPing;
class MPoolOp;
class MPoolOpReply;
class MRecoveryReserve;
class MRemoveSnaps;
class MRoute;
class MServiceMap;
class MStatfs;
class MStatfsReply;
class MTimeCheck2;
class MTimeCheck;
class MWatchNotify;
class PaxosServiceMessage;
#endif
| 4,134 | 21.231183 | 130 |
h
|
null |
ceph-main/src/msg/Messenger.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <netdb.h>
#include "include/types.h"
#include "include/random.h"
#include "Messenger.h"
#include "msg/async/AsyncMessenger.h"
Messenger *Messenger::create_client_messenger(CephContext *cct, std::string lname)
{
std::string public_msgr_type = cct->_conf->ms_public_type.empty() ? cct->_conf.get_val<std::string>("ms_type") : cct->_conf->ms_public_type;
auto nonce = get_random_nonce();
return Messenger::create(cct, public_msgr_type, entity_name_t::CLIENT(),
std::move(lname), nonce);
}
uint64_t Messenger::get_random_nonce()
{
// in the past the logic here was more complex -- we were trying
// to use the PID but, in the containerized world, it turned out
// unreliable. To deal with this, we started guessing whether we
// run in a container or not, and of course, got manual lever to
// intervene if guessed wrong (CEPH_USE_RANDOM_NONCE).
return ceph::util::generate_random_number<uint64_t>();
}
Messenger *Messenger::create(CephContext *cct, const std::string &type,
entity_name_t name, std::string lname,
uint64_t nonce)
{
if (type == "random" || type.find("async") != std::string::npos)
return new AsyncMessenger(cct, name, type, std::move(lname), nonce);
lderr(cct) << "unrecognized ms_type '" << type << "'" << dendl;
return nullptr;
}
/**
* Get the default crc flags for this messenger.
* but not yet dispatched.
*/
static int get_default_crc_flags(const ConfigProxy&);
Messenger::Messenger(CephContext *cct_, entity_name_t w)
: trace_endpoint("0.0.0.0", 0, "Messenger"),
my_name(w),
default_send_priority(CEPH_MSG_PRIO_DEFAULT),
started(false),
magic(0),
socket_priority(-1),
cct(cct_),
crcflags(get_default_crc_flags(cct->_conf)),
auth_registry(cct),
comp_registry(cct)
{
auth_registry.refresh_config();
comp_registry.refresh_config();
}
void Messenger::set_endpoint_addr(const entity_addr_t& a,
const entity_name_t &name)
{
size_t hostlen;
if (a.get_family() == AF_INET)
hostlen = sizeof(struct sockaddr_in);
else if (a.get_family() == AF_INET6)
hostlen = sizeof(struct sockaddr_in6);
else
hostlen = 0;
if (hostlen) {
char buf[NI_MAXHOST] = { 0 };
getnameinfo(a.get_sockaddr(), hostlen, buf, sizeof(buf),
NULL, 0, NI_NUMERICHOST);
trace_endpoint.copy_ip(buf);
}
trace_endpoint.set_port(a.get_port());
}
/**
* Get the default crc flags for this messenger.
* but not yet dispatched.
*
* Pre-calculate desired software CRC settings. CRC computation may
* be disabled by default for some transports (e.g., those with strong
* hardware checksum support).
*/
int get_default_crc_flags(const ConfigProxy& conf)
{
int r = 0;
if (conf->ms_crc_data)
r |= MSG_CRC_DATA;
if (conf->ms_crc_header)
r |= MSG_CRC_HEADER;
return r;
}
int Messenger::bindv(const entity_addrvec_t& bind_addrs,
std::optional<entity_addrvec_t> public_addrs)
{
return bind(bind_addrs.legacy_addr(), std::move(public_addrs));
}
| 3,152 | 28.194444 | 142 |
cc
|
null |
ceph-main/src/msg/Messenger.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MESSENGER_H
#define CEPH_MESSENGER_H
#include <deque>
#include <map>
#include <optional>
#include <errno.h>
#include <sstream>
#include <memory>
#include "Message.h"
#include "Dispatcher.h"
#include "Policy.h"
#include "common/Throttle.h"
#include "include/Context.h"
#include "include/types.h"
#include "include/ceph_features.h"
#include "auth/Crypto.h"
#include "common/item_history.h"
#include "auth/AuthRegistry.h"
#include "compressor_registry.h"
#include "include/ceph_assert.h"
#include <errno.h>
#include <sstream>
#include <signal.h>
#define SOCKET_PRIORITY_MIN_DELAY 6
class Timer;
class AuthClient;
class AuthServer;
#ifdef UNIT_TESTS_BUILT
struct Interceptor {
std::mutex lock;
std::condition_variable cond_var;
enum ACTION : uint32_t {
CONTINUE = 0,
FAIL,
STOP
};
enum STEP {
START_CLIENT_BANNER_EXCHANGE = 1,
START_SERVER_BANNER_EXCHANGE,
BANNER_EXCHANGE_BANNER_CONNECTING,
BANNER_EXCHANGE,
HANDLE_PEER_BANNER_BANNER_CONNECTING,
HANDLE_PEER_BANNER,
HANDLE_PEER_BANNER_PAYLOAD_HELLO_CONNECTING,
HANDLE_PEER_BANNER_PAYLOAD,
SEND_AUTH_REQUEST,
HANDLE_AUTH_REQUEST_ACCEPTING_SIGN,
SEND_CLIENT_IDENTITY,
SEND_SERVER_IDENTITY,
SEND_RECONNECT,
SEND_RECONNECT_OK,
READY,
HANDLE_MESSAGE,
READ_MESSAGE_COMPLETE,
SESSION_RETRY,
SEND_COMPRESSION_REQUEST,
HANDLE_COMPRESSION_REQUEST
};
virtual ~Interceptor() {}
virtual ACTION intercept(Connection *conn, uint32_t step) = 0;
};
#endif
class Messenger {
private:
std::deque<Dispatcher*> dispatchers;
std::deque<Dispatcher*> fast_dispatchers;
ZTracer::Endpoint trace_endpoint;
protected:
void set_endpoint_addr(const entity_addr_t& a,
const entity_name_t &name);
protected:
/// the "name" of the local daemon. eg client.99
entity_name_t my_name;
/// my addr
safe_item_history<entity_addrvec_t> my_addrs;
int default_send_priority;
/// std::set to true once the Messenger has started, and std::set to false on shutdown
bool started;
uint32_t magic;
int socket_priority;
public:
AuthClient *auth_client = 0;
AuthServer *auth_server = 0;
#ifdef UNIT_TESTS_BUILT
Interceptor *interceptor = nullptr;
#endif
/**
* The CephContext this Messenger uses. Many other components initialize themselves
* from this value.
*/
CephContext *cct;
int crcflags;
using Policy = ceph::net::Policy<Throttle>;
public:
// allow unauthenticated connections. This is needed for
// compatibility with pre-nautilus OSDs, which do not authenticate
// the heartbeat sessions.
bool require_authorizer = true;
protected:
// for authentication
AuthRegistry auth_registry;
public:
/**
* Messenger constructor. Call this from your implementation.
* Messenger users should construct full implementations directly,
* or use the create() function.
*/
Messenger(CephContext *cct_, entity_name_t w);
virtual ~Messenger() {}
/**
* create a new messenger
*
* Create a new messenger instance, with whatever implementation is
* available or specified via the configuration in cct.
*
* @param cct context
* @param type name of messenger type
* @param name entity name to register
* @param lname logical name of the messenger in this process (e.g., "client")
* @param nonce nonce value to uniquely identify this instance on the current host
*/
static Messenger *create(CephContext *cct,
const std::string &type,
entity_name_t name,
std::string lname,
uint64_t nonce);
static uint64_t get_random_nonce();
/**
* create a new messenger
*
* Create a new messenger instance.
* Same as the above, but a slightly simpler interface for clients:
* - Generate a random nonce
* - get the messenger type from cct
* - use the client entity_type
*
* @param cct context
* @param lname logical name of the messenger in this process (e.g., "client")
*/
static Messenger *create_client_messenger(CephContext *cct, std::string lname);
/**
* @defgroup Accessors
* @{
*/
int get_mytype() const { return my_name.type(); }
/**
* Retrieve the Messenger's name
*
* @return A const reference to the name this Messenger
* currently believes to be its own.
*/
const entity_name_t& get_myname() { return my_name; }
/**
* Retrieve the Messenger's address.
*
* @return A const reference to the address this Messenger
* currently believes to be its own.
*/
const entity_addrvec_t& get_myaddrs() {
return *my_addrs;
}
/**
* get legacy addr for myself, suitable for protocol v1
*
* Note that myaddrs might be a proper addrvec with v1 in it, or it might be an
* ANY addr (if i am a pure client).
*/
entity_addr_t get_myaddr_legacy() {
return my_addrs->as_legacy_addr();
}
/**
* std::set messenger's instance
*/
uint32_t get_magic() { return magic; }
void set_magic(int _magic) { magic = _magic; }
void set_auth_client(AuthClient *ac) {
auth_client = ac;
}
void set_auth_server(AuthServer *as) {
auth_server = as;
}
// for compression
CompressorRegistry comp_registry;
protected:
/**
* std::set messenger's address
*/
virtual void set_myaddrs(const entity_addrvec_t& a) {
my_addrs = a;
set_endpoint_addr(a.front(), my_name);
}
public:
/**
* @return the zipkin trace endpoint
*/
const ZTracer::Endpoint* get_trace_endpoint() const {
return &trace_endpoint;
}
/**
* set the name of the local entity. The name is reported to others and
* can be changed while the system is running, but doing so at incorrect
* times may have bad results.
*
* @param m The name to std::set.
*/
void set_myname(const entity_name_t& m) { my_name = m; }
/**
* set the unknown address components for this Messenger.
* This is useful if the Messenger doesn't know its full address just by
* binding, but another Messenger on the same interface has already learned
* its full address. This function does not fill in known address elements,
* cause a rebind, or do anything of that sort.
*
* @param addr The address to use as a template.
*/
virtual bool set_addr_unknowns(const entity_addrvec_t &addrs) = 0;
/// Get the default send priority.
int get_default_send_priority() { return default_send_priority; }
/**
* Get the number of Messages which the Messenger has received
* but not yet dispatched.
*/
virtual int get_dispatch_queue_len() = 0;
/**
* Get age of oldest undelivered message
* (0 if the queue is empty)
*/
virtual double get_dispatch_queue_max_age(utime_t now) = 0;
/**
* @} // Accessors
*/
/**
* @defgroup Configuration
* @{
*/
/**
* set the cluster protocol in use by this daemon.
* This is an init-time function and cannot be called after calling
* start() or bind().
*
* @param p The cluster protocol to use. Defined externally.
*/
virtual void set_cluster_protocol(int p) = 0;
/**
* set a policy which is applied to all peers who do not have a type-specific
* Policy.
* This is an init-time function and cannot be called after calling
* start() or bind().
*
* @param p The Policy to apply.
*/
virtual void set_default_policy(Policy p) = 0;
/**
* set a policy which is applied to all peers of the given type.
* This is an init-time function and cannot be called after calling
* start() or bind().
*
* @param type The peer type this policy applies to.
* @param p The policy to apply.
*/
virtual void set_policy(int type, Policy p) = 0;
/**
* set the Policy associated with a type of peer.
*
* This can be called either on initial setup, or after connections
* are already established. However, the policies for existing
* connections will not be affected; the new policy will only apply
* to future connections.
*
* @param t The peer type to get the default policy for.
* @return A const Policy reference.
*/
virtual Policy get_policy(int t) = 0;
/**
* Get the default Policy
*
* @return A const Policy reference.
*/
virtual Policy get_default_policy() = 0;
/**
* set Throttlers applied to all Messages from the given type of peer
*
* This is an init-time function and cannot be called after calling
* start() or bind().
*
* @param type The peer type the Throttlers will apply to.
* @param bytes The Throttle for the number of bytes carried by the message
* @param msgs The Throttle for the number of messages for this @p type
* @note The Messenger does not take ownership of the Throttle pointers, but
* you must not destroy them before you destroy the Messenger.
*/
virtual void set_policy_throttlers(int type, Throttle *bytes, Throttle *msgs=NULL) = 0;
/**
* set the default send priority
*
* This is an init-time function and must be called *before* calling
* start().
*
* @param p The cluster protocol to use. Defined externally.
*/
void set_default_send_priority(int p) {
ceph_assert(!started);
default_send_priority = p;
}
/**
* set the priority(SO_PRIORITY) for all packets to be sent on this socket.
*
* Linux uses this value to order the networking queues: packets with a higher
* priority may be processed first depending on the selected device queueing
* discipline.
*
* @param prio The priority. Setting a priority outside the range 0 to 6
* requires the CAP_NET_ADMIN capability.
*/
void set_socket_priority(int prio) {
socket_priority = prio;
}
/**
* Get the socket priority
*
* @return the socket priority
*/
int get_socket_priority() {
return socket_priority;
}
/**
* Add a new Dispatcher to the front of the list. If you add
* a Dispatcher which is already included, it will get a duplicate
* entry. This will reduce efficiency but not break anything.
*
* @param d The Dispatcher to insert into the list.
*/
void add_dispatcher_head(Dispatcher *d) {
bool first = dispatchers.empty();
dispatchers.push_front(d);
if (d->ms_can_fast_dispatch_any())
fast_dispatchers.push_front(d);
if (first)
ready();
}
/**
* Add a new Dispatcher to the end of the list. If you add
* a Dispatcher which is already included, it will get a duplicate
* entry. This will reduce efficiency but not break anything.
*
* @param d The Dispatcher to insert into the list.
*/
void add_dispatcher_tail(Dispatcher *d) {
bool first = dispatchers.empty();
dispatchers.push_back(d);
if (d->ms_can_fast_dispatch_any())
fast_dispatchers.push_back(d);
if (first)
ready();
}
/**
* Bind the Messenger to a specific address. If bind_addr
* is not completely filled in the system will use the
* valid portions and cycle through the unset ones (eg, the port)
* in an unspecified order.
*
* @param bind_addr The address to bind to.
* @patam public_addrs The addresses to announce over the network
* @return 0 on success, or -1 on error, or -errno if
* we can be more specific about the failure.
*/
virtual int bind(const entity_addr_t& bind_addr,
std::optional<entity_addrvec_t> public_addrs=std::nullopt) = 0;
virtual int bindv(const entity_addrvec_t& bind_addrs,
std::optional<entity_addrvec_t> public_addrs=std::nullopt);
/**
* This function performs a full restart of the Messenger component,
* whatever that means. Other entities who connect to this
* Messenger post-rebind() should perceive it as a new entity which
* they have not previously contacted, and it MUST bind to a
* different address than it did previously.
*
* @param avoid_ports Additional port to avoid binding to.
*/
virtual int rebind(const std::set<int>& avoid_ports) { return -EOPNOTSUPP; }
/**
* Bind the 'client' Messenger to a specific address.Messenger will bind
* the address before connect to others when option ms_bind_before_connect
* is true.
* @param bind_addr The address to bind to.
* @return 0 on success, or -1 on error, or -errno if
* we can be more specific about the failure.
*/
virtual int client_bind(const entity_addr_t& bind_addr) = 0;
/**
* reset the 'client' Messenger. Mark all the existing Connections down
* and update 'nonce'.
*/
virtual int client_reset() = 0;
virtual bool should_use_msgr2() {
return false;
}
/**
* @} // Configuration
*/
/**
* @defgroup Startup/Shutdown
* @{
*/
/**
* Perform any resource allocation, thread startup, etc
* that is required before attempting to connect to other
* Messengers or transmit messages.
* Once this function completes, started shall be set to true.
*
* @return 0 on success; -errno on failure.
*/
virtual int start() { started = true; return 0; }
// shutdown
/**
* Block until the Messenger has finished shutting down (according
* to the shutdown() function).
* It is valid to call this after calling shutdown(), but it must
* be called before deleting the Messenger.
*/
virtual void wait() = 0;
/**
* Initiate a shutdown of the Messenger.
*
* @return 0 on success, -errno otherwise.
*/
virtual int shutdown() { started = false; return 0; }
/**
* @} // Startup/Shutdown
*/
/**
* @defgroup Messaging
* @{
*/
/**
* Queue the given Message for the given entity.
* Success in this function does not guarantee Message delivery, only
* success in queueing the Message. Other guarantees may be provided based
* on the Connection policy associated with the dest.
*
* @param m The Message to send. The Messenger consumes a single reference
* when you pass it in.
* @param dest The entity to send the Message to.
*
* DEPRECATED: please do not use this interface for any new code;
* use the Connection* variant.
*
* @return 0 on success, or -errno on failure.
*/
virtual int send_to(
Message *m,
int type,
const entity_addrvec_t& addr) = 0;
int send_to_mon(
Message *m, const entity_addrvec_t& addrs) {
return send_to(m, CEPH_ENTITY_TYPE_MON, addrs);
}
int send_to_mds(
Message *m, const entity_addrvec_t& addrs) {
return send_to(m, CEPH_ENTITY_TYPE_MDS, addrs);
}
/**
* @} // Messaging
*/
/**
* @defgroup Connection Management
* @{
*/
/**
* Get the Connection object associated with a given entity. If a
* Connection does not exist, create one and establish a logical connection.
* The caller owns a reference when this returns. Call ->put() when you're
* done!
*
* @param dest The entity to get a connection for.
*/
virtual ConnectionRef connect_to(
int type, const entity_addrvec_t& dest,
bool anon=false, bool not_local_dest=false) = 0;
ConnectionRef connect_to_mon(const entity_addrvec_t& dest,
bool anon=false, bool not_local_dest=false) {
return connect_to(CEPH_ENTITY_TYPE_MON, dest, anon, not_local_dest);
}
ConnectionRef connect_to_mds(const entity_addrvec_t& dest,
bool anon=false, bool not_local_dest=false) {
return connect_to(CEPH_ENTITY_TYPE_MDS, dest, anon, not_local_dest);
}
ConnectionRef connect_to_osd(const entity_addrvec_t& dest,
bool anon=false, bool not_local_dest=false) {
return connect_to(CEPH_ENTITY_TYPE_OSD, dest, anon, not_local_dest);
}
ConnectionRef connect_to_mgr(const entity_addrvec_t& dest,
bool anon=false, bool not_local_dest=false) {
return connect_to(CEPH_ENTITY_TYPE_MGR, dest, anon, not_local_dest);
}
/**
* Get the Connection object associated with ourselves.
*/
virtual ConnectionRef get_loopback_connection() = 0;
/**
* Mark down a Connection to a remote.
*
* This will cause us to discard our outgoing queue for them, and if
* reset detection is enabled in the policy and the endpoint tries
* to reconnect they will discard their queue when we inform them of
* the session reset.
*
* If there is no Connection to the given dest, it is a no-op.
*
* This generates a RESET notification to the Dispatcher.
*
* DEPRECATED: please do not use this interface for any new code;
* use the Connection* variant.
*
* @param a The address to mark down.
*/
virtual void mark_down(const entity_addr_t& a) = 0;
virtual void mark_down_addrs(const entity_addrvec_t& a) {
mark_down(a.legacy_addr());
}
/**
* Mark all the existing Connections down. This is equivalent
* to iterating over all Connections and calling mark_down()
* on each.
*
* This will generate a RESET event for each closed connections.
*/
virtual void mark_down_all() = 0;
/**
* @} // Connection Management
*/
protected:
/**
* @defgroup Subclass Interfacing
* @{
*/
/**
* A courtesy function for Messenger implementations which
* will be called when we receive our first Dispatcher.
*/
virtual void ready() { }
/**
* @} // Subclass Interfacing
*/
public:
#ifdef CEPH_USE_SIGPIPE_BLOCKER
/**
* We need to disable SIGPIPE on all platforms, and if they
* don't give us a better mechanism (read: are on Solaris) that
* means blocking the signal whenever we do a send or sendmsg...
* That means any implementations must invoke MSGR_SIGPIPE_STOPPER in-scope
* whenever doing so. On most systems that's blank, but on systems where
* it's needed we construct an RAII object to plug and un-plug the SIGPIPE.
* See http://www.microhowto.info/howto/ignore_sigpipe_without_affecting_other_threads_in_a_process.html
*/
struct sigpipe_stopper {
bool blocked;
sigset_t existing_mask;
sigset_t pipe_mask;
sigpipe_stopper() {
sigemptyset(&pipe_mask);
sigaddset(&pipe_mask, SIGPIPE);
sigset_t signals;
sigemptyset(&signals);
sigpending(&signals);
if (sigismember(&signals, SIGPIPE)) {
blocked = false;
} else {
blocked = true;
int r = pthread_sigmask(SIG_BLOCK, &pipe_mask, &existing_mask);
ceph_assert(r == 0);
}
}
~sigpipe_stopper() {
if (blocked) {
struct timespec nowait{0};
int r = sigtimedwait(&pipe_mask, 0, &nowait);
ceph_assert(r == EAGAIN || r == 0);
r = pthread_sigmask(SIG_SETMASK, &existing_mask, 0);
ceph_assert(r == 0);
}
}
};
# define MSGR_SIGPIPE_STOPPER Messenger::sigpipe_stopper stopper();
#else
# define MSGR_SIGPIPE_STOPPER
#endif
/**
* @defgroup Dispatcher Interfacing
* @{
*/
/**
* Determine whether a message can be fast-dispatched. We will
* query each Dispatcher in sequence to determine if they are
* capable of handling a particular message via "fast dispatch".
*
* @param m The Message we are testing.
*/
bool ms_can_fast_dispatch(const ceph::cref_t<Message>& m) {
for (const auto &dispatcher : fast_dispatchers) {
if (dispatcher->ms_can_fast_dispatch2(m))
return true;
}
return false;
}
/**
* Deliver a single Message via "fast dispatch".
*
* @param m The Message we are fast dispatching.
* If none of our Dispatchers can handle it, ceph_abort().
*/
void ms_fast_dispatch(const ceph::ref_t<Message> &m) {
m->set_dispatch_stamp(ceph_clock_now());
for (const auto &dispatcher : fast_dispatchers) {
if (dispatcher->ms_can_fast_dispatch2(m)) {
dispatcher->ms_fast_dispatch2(m);
return;
}
}
ceph_abort();
}
void ms_fast_dispatch(Message *m) {
return ms_fast_dispatch(ceph::ref_t<Message>(m, false)); /* consume ref */
}
/**
*
*/
void ms_fast_preprocess(const ceph::ref_t<Message> &m) {
for (const auto &dispatcher : fast_dispatchers) {
dispatcher->ms_fast_preprocess2(m);
}
}
/**
* Deliver a single Message. Send it to each Dispatcher
* in sequence until one of them handles it.
* If none of our Dispatchers can handle it, ceph_abort().
*
* @param m The Message to deliver.
*/
void ms_deliver_dispatch(const ceph::ref_t<Message> &m) {
m->set_dispatch_stamp(ceph_clock_now());
for (const auto &dispatcher : dispatchers) {
if (dispatcher->ms_dispatch2(m))
return;
}
lsubdout(cct, ms, 0) << "ms_deliver_dispatch: unhandled message " << m << " " << *m << " from "
<< m->get_source_inst() << dendl;
ceph_assert(!cct->_conf->ms_die_on_unhandled_msg);
}
void ms_deliver_dispatch(Message *m) {
return ms_deliver_dispatch(ceph::ref_t<Message>(m, false)); /* consume ref */
}
/**
* Notify each Dispatcher of a new Connection. Call
* this function whenever a new Connection is initiated or
* reconnects.
*
* @param con Pointer to the new Connection.
*/
void ms_deliver_handle_connect(Connection *con) {
for (const auto& dispatcher : dispatchers) {
dispatcher->ms_handle_connect(con);
}
}
/**
* Notify each fast Dispatcher of a new Connection. Call
* this function whenever a new Connection is initiated or
* reconnects.
*
* @param con Pointer to the new Connection.
*/
void ms_deliver_handle_fast_connect(Connection *con) {
for (const auto& dispatcher : fast_dispatchers) {
dispatcher->ms_handle_fast_connect(con);
}
}
/**
* Notify each Dispatcher of a new incoming Connection. Call
* this function whenever a new Connection is accepted.
*
* @param con Pointer to the new Connection.
*/
void ms_deliver_handle_accept(Connection *con) {
for (const auto& dispatcher : dispatchers) {
dispatcher->ms_handle_accept(con);
}
}
/**
* Notify each fast Dispatcher of a new incoming Connection. Call
* this function whenever a new Connection is accepted.
*
* @param con Pointer to the new Connection.
*/
void ms_deliver_handle_fast_accept(Connection *con) {
for (const auto& dispatcher : fast_dispatchers) {
dispatcher->ms_handle_fast_accept(con);
}
}
/**
* Notify each Dispatcher of a Connection which may have lost
* Messages. Call this function whenever you detect that a lossy Connection
* has been disconnected.
*
* @param con Pointer to the broken Connection.
*/
void ms_deliver_handle_reset(Connection *con) {
for (const auto& dispatcher : dispatchers) {
if (dispatcher->ms_handle_reset(con))
return;
}
}
/**
* Notify each Dispatcher of a Connection which has been "forgotten" about
* by the remote end, implying that messages have probably been lost.
* Call this function whenever you detect a reset.
*
* @param con Pointer to the broken Connection.
*/
void ms_deliver_handle_remote_reset(Connection *con) {
for (const auto& dispatcher : dispatchers) {
dispatcher->ms_handle_remote_reset(con);
}
}
/**
* Notify each Dispatcher of a Connection for which reconnection
* attempts are being refused. Call this function whenever you
* detect that a lossy Connection has been disconnected and it's
* impossible to reconnect.
*
* @param con Pointer to the broken Connection.
*/
void ms_deliver_handle_refused(Connection *con) {
for (const auto& dispatcher : dispatchers) {
if (dispatcher->ms_handle_refused(con))
return;
}
}
void set_require_authorizer(bool b) {
require_authorizer = b;
}
/**
* @} // Dispatcher Interfacing
*/
};
#endif
| 23,947 | 27.957678 | 106 |
h
|
null |
ceph-main/src/msg/Policy.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/ceph_features.h"
namespace ceph::net {
using peer_type_t = int;
/**
* A Policy describes the rules of a Connection. Is there a limit on how
* much data this Connection can have locally? When the underlying connection
* experiences an error, does the Connection disappear? Can this Messenger
* re-establish the underlying connection?
*/
template<class ThrottleType>
struct Policy {
/// If true, the Connection is tossed out on errors.
bool lossy;
/// If true, the underlying connection can't be re-established from this end.
bool server;
/// If true, we will standby when idle
bool standby;
/// If true, we will try to detect session resets
bool resetcheck;
/// Server: register lossy client connections.
bool register_lossy_clients = true;
// The net result of this is that a given client can only have one
// open connection with the server. If a new connection is made,
// the old (registered) one is closed by the messenger during the accept
// process.
/**
* The throttler is used to limit how much data is held by Messages from
* the associated Connection(s). When reading in a new Message, the Messenger
* will call throttler->throttle() for the size of the new Message.
*/
ThrottleType* throttler_bytes;
ThrottleType* throttler_messages;
/// Specify features supported locally by the endpoint.
#ifdef MSG_POLICY_UNIT_TESTING
uint64_t features_supported{CEPH_FEATURES_SUPPORTED_DEFAULT};
#else
static constexpr uint64_t features_supported{CEPH_FEATURES_SUPPORTED_DEFAULT};
#endif
/// Specify features any remotes must have to talk to this endpoint.
uint64_t features_required;
Policy()
: lossy(false), server(false), standby(false), resetcheck(true),
throttler_bytes(NULL),
throttler_messages(NULL),
features_required(0) {}
private:
Policy(bool l, bool s, bool st, bool r, bool rlc, uint64_t req)
: lossy(l), server(s), standby(st), resetcheck(r),
register_lossy_clients(rlc),
throttler_bytes(NULL),
throttler_messages(NULL),
features_required(req) {}
public:
static Policy stateful_server(uint64_t req) {
return Policy(false, true, true, true, true, req);
}
static Policy stateless_registered_server(uint64_t req) {
return Policy(true, true, false, false, true, req);
}
static Policy stateless_server(uint64_t req) {
return Policy(true, true, false, false, false, req);
}
static Policy lossless_peer(uint64_t req) {
return Policy(false, false, true, false, true, req);
}
static Policy lossless_peer_reuse(uint64_t req) {
return Policy(false, false, true, true, true, req);
}
static Policy lossy_client(uint64_t req) {
return Policy(true, false, false, false, true, req);
}
static Policy lossless_client(uint64_t req) {
return Policy(false, false, false, true, true, req);
}
};
template<class ThrottleType>
class PolicySet {
using policy_t = Policy<ThrottleType> ;
/// the default Policy we use for Pipes
policy_t default_policy;
/// map specifying different Policies for specific peer types
std::map<int, policy_t> policy_map; // entity_name_t::type -> Policy
public:
const policy_t& get(peer_type_t peer_type) const {
if (auto found = policy_map.find(peer_type); found != policy_map.end()) {
return found->second;
} else {
return default_policy;
}
}
policy_t& get(peer_type_t peer_type) {
if (auto found = policy_map.find(peer_type); found != policy_map.end()) {
return found->second;
} else {
return default_policy;
}
}
void set(peer_type_t peer_type, const policy_t& p) {
policy_map[peer_type] = p;
}
const policy_t& get_default() const {
return default_policy;
}
void set_default(const policy_t& p) {
default_policy = p;
}
void set_throttlers(peer_type_t peer_type,
ThrottleType* byte_throttle,
ThrottleType* msg_throttle) {
auto& policy = get(peer_type);
policy.throttler_bytes = byte_throttle;
policy.throttler_messages = msg_throttle;
}
};
}
| 4,232 | 30.827068 | 80 |
h
|
null |
ceph-main/src/msg/SimplePolicyMessenger.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
* Portions Copyright (C) 2013 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef SIMPLE_POLICY_MESSENGER_H
#define SIMPLE_POLICY_MESSENGER_H
#include "Messenger.h"
#include "Policy.h"
class SimplePolicyMessenger : public Messenger
{
private:
/// lock protecting policy
ceph::mutex policy_lock =
ceph::make_mutex("SimplePolicyMessenger::policy_lock");
// entity_name_t::type -> Policy
ceph::net::PolicySet<Throttle> policy_set;
public:
SimplePolicyMessenger(CephContext *cct, entity_name_t name)
: Messenger(cct, name)
{
}
/**
* Get the Policy associated with a type of peer.
* @param t The peer type to get the default policy for.
*
* @return A const Policy reference.
*/
Policy get_policy(int t) override {
std::lock_guard l{policy_lock};
return policy_set.get(t);
}
Policy get_default_policy() override {
std::lock_guard l{policy_lock};
return policy_set.get_default();
}
/**
* Set a policy which is applied to all peers who do not have a type-specific
* Policy.
* This is an init-time function and cannot be called after calling
* start() or bind().
*
* @param p The Policy to apply.
*/
void set_default_policy(Policy p) override {
std::lock_guard l{policy_lock};
policy_set.set_default(p);
}
/**
* Set a policy which is applied to all peers of the given type.
* This is an init-time function and cannot be called after calling
* start() or bind().
*
* @param type The peer type this policy applies to.
* @param p The policy to apply.
*/
void set_policy(int type, Policy p) override {
std::lock_guard l{policy_lock};
policy_set.set(type, p);
}
/**
* Set a Throttler which is applied to all Messages from the given
* type of peer.
* This is an init-time function and cannot be called after calling
* start() or bind().
*
* @param type The peer type this Throttler will apply to.
* @param t The Throttler to apply. The messenger does not take
* ownership of this pointer, but you must not destroy it before
* you destroy messenger.
*/
void set_policy_throttlers(int type,
Throttle* byte_throttle,
Throttle* msg_throttle) override {
std::lock_guard l{policy_lock};
policy_set.set_throttlers(type, byte_throttle, msg_throttle);
}
}; /* SimplePolicyMessenger */
#endif /* SIMPLE_POLICY_MESSENGER_H */
| 2,806 | 27.07 | 79 |
h
|
null |
ceph-main/src/msg/compressor_registry.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "compressor_registry.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "CompressorRegistry(" << this << ") "
CompressorRegistry::CompressorRegistry(CephContext *cct)
: cct(cct)
{
cct->_conf.add_observer(this);
}
CompressorRegistry::~CompressorRegistry()
{
cct->_conf.remove_observer(this);
}
const char** CompressorRegistry::get_tracked_conf_keys() const
{
static const char *keys[] = {
"ms_osd_compress_mode",
"ms_osd_compression_algorithm",
"ms_osd_compress_min_size",
"ms_compress_secure",
nullptr
};
return keys;
}
void CompressorRegistry::handle_conf_change(
const ConfigProxy& conf,
const std::set<std::string>& changed)
{
std::scoped_lock l(lock);
_refresh_config();
}
std::vector<uint32_t> CompressorRegistry::_parse_method_list(const std::string& s)
{
std::vector<uint32_t> methods;
for_each_substr(s, ";,= \t", [&] (auto method) {
ldout(cct,20) << "adding algorithm method: " << method << dendl;
auto alg_type = Compressor::get_comp_alg_type(method);
if (alg_type) {
methods.push_back(*alg_type);
} else {
ldout(cct,5) << "WARNING: unknown algorithm method " << method << dendl;
}
});
if (methods.empty()) {
methods.push_back(Compressor::COMP_ALG_NONE);
}
ldout(cct,20) << __func__ << " " << s << " -> " << methods << dendl;
return methods;
}
void CompressorRegistry::_refresh_config()
{
auto c_mode = Compressor::get_comp_mode_type(cct->_conf.get_val<std::string>("ms_osd_compress_mode"));
if (c_mode) {
ms_osd_compress_mode = *c_mode;
} else {
ldout(cct,1) << __func__ << " failed to identify ms_osd_compress_mode "
<< ms_osd_compress_mode << dendl;
ms_osd_compress_mode = Compressor::COMP_NONE;
}
ms_osd_compression_methods = _parse_method_list(cct->_conf.get_val<std::string>("ms_osd_compression_algorithm"));
ms_osd_compress_min_size = cct->_conf.get_val<std::uint64_t>("ms_osd_compress_min_size");
ms_compress_secure = cct->_conf.get_val<bool>("ms_compress_secure");
ldout(cct,10) << __func__ << " ms_osd_compression_mode " << ms_osd_compress_mode
<< " ms_osd_compression_methods " << ms_osd_compression_methods
<< " ms_osd_compress_above_min_size " << ms_osd_compress_min_size
<< " ms_compress_secure " << ms_compress_secure
<< dendl;
}
Compressor::CompressionAlgorithm
CompressorRegistry::pick_method(uint32_t peer_type,
const std::vector<uint32_t>& preferred_methods)
{
std::vector<uint32_t> allowed_methods = get_methods(peer_type);
auto preferred = std::find_first_of(preferred_methods.begin(),
preferred_methods.end(),
allowed_methods.begin(),
allowed_methods.end());
if (preferred == preferred_methods.end()) {
ldout(cct,1) << "failed to pick compression method from client's "
<< preferred_methods
<< " and our " << allowed_methods << dendl;
return Compressor::COMP_ALG_NONE;
} else {
return static_cast<Compressor::CompressionAlgorithm>(*preferred);
}
}
Compressor::CompressionMode
CompressorRegistry::get_mode(uint32_t peer_type, bool is_secure)
{
std::scoped_lock l(lock);
ldout(cct, 20) << __func__ << " peer_type " << peer_type
<< " is_secure " << is_secure << dendl;
if (is_secure && !ms_compress_secure) {
return Compressor::COMP_NONE;
}
switch (peer_type) {
case CEPH_ENTITY_TYPE_OSD:
return static_cast<Compressor::CompressionMode>(ms_osd_compress_mode);
default:
return Compressor::COMP_NONE;
}
}
| 3,797 | 28.905512 | 115 |
cc
|
null |
ceph-main/src/msg/compressor_registry.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <vector>
#include "compressor/Compressor.h"
#include "common/ceph_mutex.h"
#include "common/ceph_context.h"
#include "common/config_cacher.h"
class CompressorRegistry : public md_config_obs_t {
public:
CompressorRegistry(CephContext *cct);
~CompressorRegistry();
void refresh_config() {
std::scoped_lock l(lock);
_refresh_config();
}
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string>& changed) override;
TOPNSPC::Compressor::CompressionAlgorithm pick_method(uint32_t peer_type,
const std::vector<uint32_t>& preferred_methods);
TOPNSPC::Compressor::CompressionMode get_mode(uint32_t peer_type, bool is_secure);
const std::vector<uint32_t> get_methods(uint32_t peer_type) {
std::scoped_lock l(lock);
switch (peer_type) {
case CEPH_ENTITY_TYPE_OSD:
return ms_osd_compression_methods;
default:
return {};
}
}
uint64_t get_min_compression_size(uint32_t peer_type) const {
std::scoped_lock l(lock);
switch (peer_type) {
case CEPH_ENTITY_TYPE_OSD:
return ms_osd_compress_min_size;
default:
return 0;
}
}
bool get_is_compress_secure() const {
std::scoped_lock l(lock);
return ms_compress_secure;
}
private:
CephContext *cct;
mutable ceph::mutex lock = ceph::make_mutex("CompressorRegistry::lock");
uint32_t ms_osd_compress_mode;
bool ms_compress_secure;
std::uint64_t ms_osd_compress_min_size;
std::vector<uint32_t> ms_osd_compression_methods;
void _refresh_config();
std::vector<uint32_t> _parse_method_list(const std::string& s);
};
| 1,838 | 25.271429 | 84 |
h
|
null |
ceph-main/src/msg/msg_fmt.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file fmtlib formatters for some msg_types.h classes
*/
#include <fmt/format.h>
#include "msg/msg_types.h"
template <>
struct fmt::formatter<entity_name_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const entity_name_t& addr, FormatContext& ctx)
{
if (addr.is_new() || addr.num() < 0) {
return fmt::format_to(ctx.out(), "{}.?", addr.type_str());
}
return fmt::format_to(ctx.out(), "{}.{}", addr.type_str(), addr.num());
}
};
| 654 | 24.192308 | 75 |
h
|
null |
ceph-main/src/msg/msg_types.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "msg_types.h"
#include <arpa/inet.h>
#include <stdlib.h>
#include <string.h>
#include <netdb.h>
#include <fmt/format.h>
#include "common/Formatter.h"
bool entity_name_t::parse(std::string_view s)
{
const char* start = s.data();
if (s.find("mon.") == 0) {
_type = TYPE_MON;
start += 4;
} else if (s.find("osd.") == 0) {
_type = TYPE_OSD;
start += 4;
} else if (s.find("mds.") == 0) {
_type = TYPE_MDS;
start += 4;
} else if (s.find("client.") == 0) {
_type = TYPE_CLIENT;
start += 7;
} else if (s.find("mgr.") == 0) {
_type = TYPE_MGR;
start += 4;
} else {
return false;
}
if (isspace(*start))
return false;
char *end = nullptr;
_num = strtoll(start, &end, 10);
if (end == nullptr || end == start) {
return false;
} else {
return end == s.data() + s.size();
}
}
void entity_name_t::dump(ceph::Formatter *f) const
{
f->dump_string("type", type_str());
f->dump_unsigned("num", num());
}
void entity_addr_t::dump(ceph::Formatter *f) const
{
f->dump_string("type", get_type_name(type));
f->dump_stream("addr") << get_sockaddr();
f->dump_unsigned("nonce", nonce);
}
void entity_inst_t::dump(ceph::Formatter *f) const
{
f->dump_object("name", name);
f->dump_object("addr", addr);
}
void entity_name_t::generate_test_instances(std::list<entity_name_t*>& o)
{
o.push_back(new entity_name_t(entity_name_t::MON()));
o.push_back(new entity_name_t(entity_name_t::MON(1)));
o.push_back(new entity_name_t(entity_name_t::OSD(1)));
o.push_back(new entity_name_t(entity_name_t::CLIENT(1)));
}
void entity_addr_t::generate_test_instances(std::list<entity_addr_t*>& o)
{
o.push_back(new entity_addr_t());
entity_addr_t *a = new entity_addr_t();
a->set_nonce(1);
o.push_back(a);
entity_addr_t *b = new entity_addr_t();
b->set_type(entity_addr_t::TYPE_LEGACY);
b->set_nonce(5);
b->set_family(AF_INET);
b->set_in4_quad(0, 127);
b->set_in4_quad(1, 0);
b->set_in4_quad(2, 1);
b->set_in4_quad(3, 2);
b->set_port(2);
o.push_back(b);
}
void entity_inst_t::generate_test_instances(std::list<entity_inst_t*>& o)
{
o.push_back(new entity_inst_t());
entity_name_t name;
entity_addr_t addr;
entity_inst_t *a = new entity_inst_t(name, addr);
o.push_back(a);
}
bool entity_addr_t::parse(const std::string_view s, int default_type)
{
const char* start = s.data();
const char* end = nullptr;
bool got = parse(start, &end, default_type);
return got && end == start + s.size();
}
bool entity_addr_t::parse(const char *s, const char **end, int default_type)
{
*this = entity_addr_t();
const char *start = s;
if (end) {
*end = s;
}
int newtype = default_type;
if (strncmp("v1:", s, 3) == 0) {
start += 3;
newtype = TYPE_LEGACY;
} else if (strncmp("v2:", s, 3) == 0) {
start += 3;
newtype = TYPE_MSGR2;
} else if (strncmp("any:", s, 4) == 0) {
start += 4;
newtype = TYPE_ANY;
} else if (*s == '-') {
newtype = TYPE_NONE;
if (end) {
*end = s + 1;
}
return true;
}
bool brackets = false;
if (*start == '[') {
start++;
brackets = true;
}
// inet_pton() requires a null terminated input, so let's fill two
// buffers, one with ipv4 allowed characters, and one with ipv6, and
// then see which parses.
char buf4[39];
char *o = buf4;
const char *p = start;
while (o < buf4 + sizeof(buf4) &&
*p && ((*p == '.') ||
(*p >= '0' && *p <= '9'))) {
*o++ = *p++;
}
*o = 0;
char buf6[64]; // actually 39 + null is sufficient.
o = buf6;
p = start;
while (o < buf6 + sizeof(buf6) &&
*p && ((*p == ':') ||
(*p >= '0' && *p <= '9') ||
(*p >= 'a' && *p <= 'f') ||
(*p >= 'A' && *p <= 'F'))) {
*o++ = *p++;
}
*o = 0;
//cout << "buf4 is '" << buf4 << "', buf6 is '" << buf6 << "'" << std::endl;
// ipv4?
struct in_addr a4;
struct in6_addr a6;
if (inet_pton(AF_INET, buf4, &a4)) {
u.sin.sin_addr.s_addr = a4.s_addr;
u.sa.sa_family = AF_INET;
p = start + strlen(buf4);
} else if (inet_pton(AF_INET6, buf6, &a6)) {
u.sa.sa_family = AF_INET6;
memcpy(&u.sin6.sin6_addr, &a6, sizeof(a6));
p = start + strlen(buf6);
} else {
return false;
}
if (brackets) {
if (*p != ']')
return false;
p++;
}
//cout << "p is " << *p << std::endl;
if (*p == ':') {
// parse a port, too!
p++;
int port = atoi(p);
if (port > MAX_PORT_NUMBER) {
return false;
}
set_port(port);
while (*p && *p >= '0' && *p <= '9')
p++;
}
if (*p == '/') {
// parse nonce, too
p++;
int non = atoi(p);
set_nonce(non);
while (*p && *p >= '0' && *p <= '9')
p++;
}
if (end)
*end = p;
type = newtype;
//cout << *this << std::endl;
return true;
}
std::ostream& operator<<(std::ostream& out, const entity_addr_t &addr)
{
if (addr.type == entity_addr_t::TYPE_NONE) {
return out << "-";
}
if (addr.type != entity_addr_t::TYPE_ANY) {
out << entity_addr_t::get_type_name(addr.type) << ":";
}
out << addr.get_sockaddr() << '/' << addr.nonce;
return out;
}
std::ostream& operator<<(std::ostream& out, const sockaddr *psa)
{
char buf[NI_MAXHOST] = { 0 };
switch (psa->sa_family) {
case AF_INET:
{
const sockaddr_in *sa = (const sockaddr_in*)psa;
inet_ntop(AF_INET, &sa->sin_addr, buf, NI_MAXHOST);
return out << buf << ':'
<< ntohs(sa->sin_port);
}
case AF_INET6:
{
const sockaddr_in6 *sa = (const sockaddr_in6*)psa;
inet_ntop(AF_INET6, &sa->sin6_addr, buf, NI_MAXHOST);
return out << '[' << buf << "]:"
<< ntohs(sa->sin6_port);
}
default:
return out << "(unrecognized address family " << psa->sa_family << ")";
}
}
std::ostream& operator<<(std::ostream& out, const sockaddr_storage &ss)
{
return out << (const sockaddr*)&ss;
}
// entity_addrvec_t
bool entity_addrvec_t::parse(const char *s, const char **end)
{
const char *orig_s = s;
const char *static_end;
if (!end) {
end = &static_end;
} else {
*end = s;
}
v.clear();
bool brackets = false;
if (*s == '[') {
// weirdness: make sure this isn't an IPV6 addr!
entity_addr_t a;
const char *p;
if (!a.parse(s, &p) || !a.is_ipv6()) {
// it's not
brackets = true;
++s;
}
}
while (*s) {
entity_addr_t a;
bool r = a.parse(s, end);
if (!r) {
if (brackets) {
v.clear();
*end = orig_s;
return false;
}
break;
}
v.push_back(a);
s = *end;
if (!brackets) {
break;
}
if (*s != ',') {
break;
}
++s;
}
if (brackets) {
if (*s == ']') {
++s;
*end = s;
} else {
*end = orig_s;
v.clear();
return false;
}
}
return !v.empty();
}
void entity_addrvec_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
using ceph::encode;
if ((features & CEPH_FEATURE_MSG_ADDR2) == 0) {
// encode a single legacy entity_addr_t for unfeatured peers
encode(legacy_addr(), bl, 0);
return;
}
encode((__u8)2, bl);
encode(v, bl, features);
}
void entity_addrvec_t::decode(ceph::buffer::list::const_iterator& bl)
{
using ceph::decode;
__u8 marker;
decode(marker, bl);
if (marker == 0) {
// legacy!
entity_addr_t addr;
addr.decode_legacy_addr_after_marker(bl);
v.clear();
v.push_back(addr);
return;
}
if (marker == 1) {
entity_addr_t addr;
DECODE_START(1, bl);
decode(addr.type, bl);
decode(addr.nonce, bl);
__u32 elen;
decode(elen, bl);
if (elen) {
struct sockaddr *sa = (struct sockaddr *)addr.get_sockaddr();
#if defined(__FreeBSD__) || defined(__APPLE__)
sa->sa_len = 0;
#endif
uint16_t ss_family;
if (elen < sizeof(ss_family)) {
throw ceph::buffer::malformed_input("elen smaller than family len");
}
decode(ss_family, bl);
sa->sa_family = ss_family;
elen -= sizeof(ss_family);
if (elen > addr.get_sockaddr_len() - sizeof(sa->sa_family)) {
throw ceph::buffer::malformed_input("elen exceeds sockaddr len");
}
bl.copy(elen, sa->sa_data);
}
DECODE_FINISH(bl);
v.clear();
v.push_back(addr);
return;
}
if (marker > 2)
throw ceph::buffer::malformed_input("entity_addrvec_marker > 2");
decode(v, bl);
}
void entity_addrvec_t::dump(ceph::Formatter *f) const
{
f->open_array_section("addrvec");
for (auto p = v.begin(); p != v.end(); ++p) {
f->dump_object("addr", *p);
}
f->close_section();
}
void entity_addrvec_t::generate_test_instances(std::list<entity_addrvec_t*>& ls)
{
ls.push_back(new entity_addrvec_t());
ls.push_back(new entity_addrvec_t());
ls.back()->v.push_back(entity_addr_t());
ls.push_back(new entity_addrvec_t());
ls.back()->v.push_back(entity_addr_t());
ls.back()->v.push_back(entity_addr_t());
}
std::string entity_addr_t::ip_only_to_str() const
{
const char *host_ip = NULL;
char addr_buf[INET6_ADDRSTRLEN];
switch (get_family()) {
case AF_INET:
host_ip = inet_ntop(AF_INET, &in4_addr().sin_addr,
addr_buf, INET_ADDRSTRLEN);
break;
case AF_INET6:
host_ip = inet_ntop(AF_INET6, &in6_addr().sin6_addr,
addr_buf, INET6_ADDRSTRLEN);
break;
default:
break;
}
return host_ip ? host_ip : "";
}
std::string entity_addr_t::ip_n_port_to_str() const
{
if (is_ipv6()) {
return fmt::format("[{}]:{}", ip_only_to_str(), get_port());
} else {
return fmt::format("{}:{}", ip_only_to_str(), get_port());
}
}
| 9,696 | 21.870283 | 80 |
cc
|
null |
ceph-main/src/msg/msg_types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_TYPES_H
#define CEPH_MSG_TYPES_H
#include <sstream>
#include <netinet/in.h>
#include <fmt/format.h>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
#include "include/ceph_features.h"
#include "include/types.h"
#include "include/blobhash.h"
#include "include/encoding.h"
#define MAX_PORT_NUMBER 65535
#ifdef _WIN32
// ceph_sockaddr_storage matches the Linux format.
#define AF_INET6_LINUX 10
#endif
namespace ceph {
class Formatter;
}
std::ostream& operator<<(std::ostream& out, const sockaddr_storage &ss);
std::ostream& operator<<(std::ostream& out, const sockaddr *sa);
typedef uint8_t entity_type_t;
class entity_name_t {
public:
entity_type_t _type;
int64_t _num;
public:
static const int TYPE_MON = CEPH_ENTITY_TYPE_MON;
static const int TYPE_MDS = CEPH_ENTITY_TYPE_MDS;
static const int TYPE_OSD = CEPH_ENTITY_TYPE_OSD;
static const int TYPE_CLIENT = CEPH_ENTITY_TYPE_CLIENT;
static const int TYPE_MGR = CEPH_ENTITY_TYPE_MGR;
static const int64_t NEW = -1;
// cons
entity_name_t() : _type(0), _num(0) { }
entity_name_t(int t, int64_t n) : _type(t), _num(n) { }
explicit entity_name_t(const ceph_entity_name &n) :
_type(n.type), _num(n.num) { }
// static cons
static entity_name_t MON(int64_t i=NEW) { return entity_name_t(TYPE_MON, i); }
static entity_name_t MDS(int64_t i=NEW) { return entity_name_t(TYPE_MDS, i); }
static entity_name_t OSD(int64_t i=NEW) { return entity_name_t(TYPE_OSD, i); }
static entity_name_t CLIENT(int64_t i=NEW) { return entity_name_t(TYPE_CLIENT, i); }
static entity_name_t MGR(int64_t i=NEW) { return entity_name_t(TYPE_MGR, i); }
int64_t num() const { return _num; }
int type() const { return _type; }
const char *type_str() const {
return ceph_entity_type_name(type());
}
bool is_new() const { return num() < 0; }
bool is_client() const { return type() == TYPE_CLIENT; }
bool is_mds() const { return type() == TYPE_MDS; }
bool is_osd() const { return type() == TYPE_OSD; }
bool is_mon() const { return type() == TYPE_MON; }
bool is_mgr() const { return type() == TYPE_MGR; }
operator ceph_entity_name() const {
ceph_entity_name n = { _type, ceph_le64(_num) };
return n;
}
bool parse(std::string_view s);
DENC(entity_name_t, v, p) {
denc(v._type, p);
denc(v._num, p);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<entity_name_t*>& o);
};
WRITE_CLASS_DENC(entity_name_t)
inline bool operator== (const entity_name_t& l, const entity_name_t& r) {
return (l.type() == r.type()) && (l.num() == r.num()); }
inline bool operator!= (const entity_name_t& l, const entity_name_t& r) {
return (l.type() != r.type()) || (l.num() != r.num()); }
inline bool operator< (const entity_name_t& l, const entity_name_t& r) {
return (l.type() < r.type()) || (l.type() == r.type() && l.num() < r.num()); }
inline std::ostream& operator<<(std::ostream& out, const entity_name_t& addr) {
//if (addr.is_namer()) return out << "namer";
if (addr.is_new() || addr.num() < 0)
return out << addr.type_str() << ".?";
else
return out << addr.type_str() << '.' << addr.num();
}
inline std::ostream& operator<<(std::ostream& out, const ceph_entity_name& addr) {
return out << entity_name_t{addr.type, static_cast<int64_t>(addr.num)};
}
namespace std {
template<> struct hash< entity_name_t >
{
size_t operator()( const entity_name_t &m ) const
{
return rjhash32(m.type() ^ m.num());
}
};
} // namespace std
// define a wire format for sockaddr that matches Linux's.
struct ceph_sockaddr_storage {
ceph_le16 ss_family;
__u8 __ss_padding[128 - sizeof(ceph_le16)];
void encode(ceph::buffer::list& bl) const {
struct ceph_sockaddr_storage ss = *this;
ss.ss_family = htons(ss.ss_family);
ceph::encode_raw(ss, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
struct ceph_sockaddr_storage ss;
ceph::decode_raw(ss, bl);
ss.ss_family = ntohs(ss.ss_family);
*this = ss;
}
} __attribute__ ((__packed__));
WRITE_CLASS_ENCODER(ceph_sockaddr_storage)
/*
* encode sockaddr.ss_family as network byte order
*/
static inline void encode(const sockaddr_storage& a, ceph::buffer::list& bl) {
#if defined(__linux__)
struct sockaddr_storage ss = a;
ss.ss_family = htons(ss.ss_family);
ceph::encode_raw(ss, bl);
#elif defined(__FreeBSD__) || defined(__APPLE__)
ceph_sockaddr_storage ss{};
auto src = (unsigned char const *)&a;
auto dst = (unsigned char *)&ss;
src += sizeof(a.ss_len);
ss.ss_family = a.ss_family;
src += sizeof(a.ss_family);
dst += sizeof(ss.ss_family);
const auto copy_size = std::min((unsigned char*)(&a + 1) - src,
(unsigned char*)(&ss + 1) - dst);
::memcpy(dst, src, copy_size);
encode(ss, bl);
#elif defined(_WIN32)
ceph_sockaddr_storage ss{};
::memcpy(&ss, &a, std::min(sizeof(ss), sizeof(a)));
// The Windows AF_INET6 definition doesn't match the Linux one.
if (a.ss_family == AF_INET6) {
ss.ss_family = AF_INET6_LINUX;
}
encode(ss, bl);
#else
ceph_sockaddr_storage ss;
::memset(&ss, '\0', sizeof(ss));
::memcpy(&ss, &a, std::min(sizeof(ss), sizeof(a)));
encode(ss, bl);
#endif
}
static inline void decode(sockaddr_storage& a,
ceph::buffer::list::const_iterator& bl) {
#if defined(__linux__)
ceph::decode_raw(a, bl);
a.ss_family = ntohs(a.ss_family);
#elif defined(__FreeBSD__) || defined(__APPLE__)
ceph_sockaddr_storage ss{};
decode(ss, bl);
auto src = (unsigned char const *)&ss;
auto dst = (unsigned char *)&a;
a.ss_len = 0;
dst += sizeof(a.ss_len);
a.ss_family = ss.ss_family;
src += sizeof(ss.ss_family);
dst += sizeof(a.ss_family);
auto const copy_size = std::min((unsigned char*)(&ss + 1) - src,
(unsigned char*)(&a + 1) - dst);
::memcpy(dst, src, copy_size);
#elif defined(_WIN32)
ceph_sockaddr_storage ss{};
decode(ss, bl);
::memcpy(&a, &ss, std::min(sizeof(ss), sizeof(a)));
if (a.ss_family == AF_INET6_LINUX) {
a.ss_family = AF_INET6;
}
#else
ceph_sockaddr_storage ss{};
decode(ss, bl);
::memcpy(&a, &ss, std::min(sizeof(ss), sizeof(a)));
#endif
}
/*
* an entity's network address.
* includes a random value that prevents it from being reused.
* thus identifies a particular process instance.
*
* This also happens to work to support cidr ranges, in which
* case the nonce contains the netmask. It's great!
*/
struct entity_addr_t {
typedef enum {
TYPE_NONE = 0,
TYPE_LEGACY = 1, ///< legacy msgr1 protocol (ceph jewel and older)
TYPE_MSGR2 = 2, ///< msgr2 protocol (new in ceph kraken)
TYPE_ANY = 3, ///< ambiguous
TYPE_CIDR = 4,
} type_t;
static const type_t TYPE_DEFAULT = TYPE_MSGR2;
static std::string_view get_type_name(int t) {
switch (t) {
case TYPE_NONE: return "none";
case TYPE_LEGACY: return "v1";
case TYPE_MSGR2: return "v2";
case TYPE_ANY: return "any";
case TYPE_CIDR: return "cidr";
default: return "???";
}
};
__u32 type;
__u32 nonce;
union {
sockaddr sa;
sockaddr_in sin;
sockaddr_in6 sin6;
} u;
entity_addr_t() : type(0), nonce(0) {
memset(&u, 0, sizeof(u));
}
entity_addr_t(__u32 _type, __u32 _nonce) : type(_type), nonce(_nonce) {
memset(&u, 0, sizeof(u));
}
explicit entity_addr_t(const ceph_entity_addr &o) {
type = o.type;
nonce = o.nonce;
memcpy(&u, &o.in_addr, sizeof(u));
#if !defined(__FreeBSD__)
u.sa.sa_family = ntohs(u.sa.sa_family);
#endif
}
uint32_t get_type() const { return type; }
void set_type(uint32_t t) { type = t; }
bool is_legacy() const { return type == TYPE_LEGACY; }
bool is_msgr2() const { return type == TYPE_MSGR2; }
bool is_any() const { return type == TYPE_ANY; }
// this isn't a guarantee; some client addrs will match it
bool maybe_cidr() const { return get_port() == 0 && nonce != 0; }
__u32 get_nonce() const { return nonce; }
void set_nonce(__u32 n) { nonce = n; }
int get_family() const {
return u.sa.sa_family;
}
void set_family(int f) {
u.sa.sa_family = f;
}
bool is_ipv4() const {
return u.sa.sa_family == AF_INET;
}
bool is_ipv6() const {
return u.sa.sa_family == AF_INET6;
}
sockaddr_in &in4_addr() {
return u.sin;
}
const sockaddr_in &in4_addr() const{
return u.sin;
}
sockaddr_in6 &in6_addr(){
return u.sin6;
}
const sockaddr_in6 &in6_addr() const{
return u.sin6;
}
const sockaddr *get_sockaddr() const {
return &u.sa;
}
size_t get_sockaddr_len() const {
switch (u.sa.sa_family) {
case AF_INET:
return sizeof(u.sin);
case AF_INET6:
return sizeof(u.sin6);
}
return sizeof(u);
}
bool set_sockaddr(const struct sockaddr *sa)
{
switch (sa->sa_family) {
case AF_INET:
// pre-zero, since we're only copying a portion of the source
memset(&u, 0, sizeof(u));
memcpy(&u.sin, sa, sizeof(u.sin));
break;
case AF_INET6:
// pre-zero, since we're only copying a portion of the source
memset(&u, 0, sizeof(u));
memcpy(&u.sin6, sa, sizeof(u.sin6));
break;
case AF_UNSPEC:
memset(&u, 0, sizeof(u));
break;
default:
return false;
}
return true;
}
sockaddr_storage get_sockaddr_storage() const {
sockaddr_storage ss;
memcpy(&ss, &u, sizeof(u));
memset((char*)&ss + sizeof(u), 0, sizeof(ss) - sizeof(u));
return ss;
}
void set_in4_quad(int pos, int val) {
u.sin.sin_family = AF_INET;
unsigned char *ipq = (unsigned char*)&u.sin.sin_addr.s_addr;
ipq[pos] = val;
}
void set_port(int port) {
switch (u.sa.sa_family) {
case AF_INET:
u.sin.sin_port = htons(port);
break;
case AF_INET6:
u.sin6.sin6_port = htons(port);
break;
default:
ceph_abort();
}
}
int get_port() const {
switch (u.sa.sa_family) {
case AF_INET:
return ntohs(u.sin.sin_port);
case AF_INET6:
return ntohs(u.sin6.sin6_port);
}
return 0;
}
operator ceph_entity_addr() const {
ceph_entity_addr a;
a.type = 0;
a.nonce = nonce;
a.in_addr = get_sockaddr_storage();
#if !defined(__FreeBSD__)
a.in_addr.ss_family = htons(a.in_addr.ss_family);
#endif
return a;
}
bool probably_equals(const entity_addr_t &o) const {
if (get_port() != o.get_port())
return false;
if (get_nonce() != o.get_nonce())
return false;
if (is_blank_ip() || o.is_blank_ip())
return true;
if (memcmp(&u, &o.u, sizeof(u)) == 0)
return true;
return false;
}
bool is_same_host(const entity_addr_t &o) const {
if (u.sa.sa_family != o.u.sa.sa_family)
return false;
if (u.sa.sa_family == AF_INET)
return u.sin.sin_addr.s_addr == o.u.sin.sin_addr.s_addr;
if (u.sa.sa_family == AF_INET6)
return memcmp(u.sin6.sin6_addr.s6_addr,
o.u.sin6.sin6_addr.s6_addr,
sizeof(u.sin6.sin6_addr.s6_addr)) == 0;
return false;
}
bool is_blank_ip() const {
switch (u.sa.sa_family) {
case AF_INET:
return u.sin.sin_addr.s_addr == INADDR_ANY;
case AF_INET6:
return memcmp(&u.sin6.sin6_addr, &in6addr_any, sizeof(in6addr_any)) == 0;
default:
return true;
}
}
bool is_ip() const {
switch (u.sa.sa_family) {
case AF_INET:
case AF_INET6:
return true;
default:
return false;
}
}
std::string ip_only_to_str() const;
std::string ip_n_port_to_str() const;
std::string get_legacy_str() const {
std::ostringstream ss;
ss << get_sockaddr() << "/" << get_nonce();
return ss.str();
}
bool parse(const std::string_view s, int default_type=TYPE_DEFAULT);
bool parse(const char *s, const char **end = 0, int default_type=TYPE_DEFAULT);
void decode_legacy_addr_after_marker(ceph::buffer::list::const_iterator& bl)
{
using ceph::decode;
__u8 marker;
__u16 rest;
decode(marker, bl);
decode(rest, bl);
decode(nonce, bl);
sockaddr_storage ss;
decode(ss, bl);
set_sockaddr((sockaddr*)&ss);
if (get_family() == AF_UNSPEC) {
type = TYPE_NONE;
} else {
type = TYPE_LEGACY;
}
}
// Right now, these only deal with sockaddr_storage that have only family and content.
// Apparently on BSD there is also an ss_len that we need to handle; this requires
// broader study
void encode(ceph::buffer::list& bl, uint64_t features) const {
using ceph::encode;
if ((features & CEPH_FEATURE_MSG_ADDR2) == 0) {
encode((__u32)0, bl);
encode(nonce, bl);
sockaddr_storage ss = get_sockaddr_storage();
encode(ss, bl);
return;
}
encode((__u8)1, bl);
ENCODE_START(1, 1, bl);
if (HAVE_FEATURE(features, SERVER_NAUTILUS)) {
encode(type, bl);
} else {
// map any -> legacy for old clients. this is primary for the benefit
// of OSDMap's blocklist, but is reasonable in general since any: is
// meaningless for pre-nautilus clients or daemons.
auto t = type;
if (t == TYPE_ANY) {
t = TYPE_LEGACY;
}
encode(t, bl);
}
encode(nonce, bl);
__u32 elen = get_sockaddr_len();
#if (__FreeBSD__) || defined(__APPLE__)
elen -= sizeof(u.sa.sa_len);
#endif
encode(elen, bl);
if (elen) {
uint16_t ss_family = u.sa.sa_family;
#if defined(_WIN32)
if (ss_family == AF_INET6) {
ss_family = AF_INET6_LINUX;
}
#endif
encode(ss_family, bl);
elen -= sizeof(u.sa.sa_family);
bl.append(u.sa.sa_data, elen);
}
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 marker;
decode(marker, bl);
if (marker == 0) {
decode_legacy_addr_after_marker(bl);
return;
}
if (marker != 1)
throw ceph::buffer::malformed_input("entity_addr_t marker != 1");
DECODE_START(1, bl);
decode(type, bl);
decode(nonce, bl);
__u32 elen;
decode(elen, bl);
if (elen) {
#if defined(__FreeBSD__) || defined(__APPLE__)
u.sa.sa_len = 0;
#endif
uint16_t ss_family;
if (elen < sizeof(ss_family)) {
throw ceph::buffer::malformed_input("elen smaller than family len");
}
decode(ss_family, bl);
#if defined(_WIN32)
if (ss_family == AF_INET6_LINUX) {
ss_family = AF_INET6;
}
#endif
u.sa.sa_family = ss_family;
elen -= sizeof(ss_family);
if (elen > get_sockaddr_len() - sizeof(u.sa.sa_family)) {
throw ceph::buffer::malformed_input("elen exceeds sockaddr len");
}
bl.copy(elen, u.sa.sa_data);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<entity_addr_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(entity_addr_t)
std::ostream& operator<<(std::ostream& out, const entity_addr_t &addr);
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<entity_addr_t> : fmt::ostream_formatter {};
#endif
inline bool operator==(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) == 0; }
inline bool operator!=(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) != 0; }
inline bool operator<(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) < 0; }
inline bool operator<=(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) <= 0; }
inline bool operator>(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) > 0; }
inline bool operator>=(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) >= 0; }
namespace std {
template<> struct hash<entity_addr_t> {
size_t operator()( const entity_addr_t& x ) const {
static blobhash H;
return H(&x, sizeof(x));
}
};
} // namespace std
struct entity_addrvec_t {
std::vector<entity_addr_t> v;
entity_addrvec_t() {}
explicit entity_addrvec_t(const entity_addr_t& a) : v({ a }) {}
unsigned size() const { return v.size(); }
bool empty() const { return v.empty(); }
entity_addr_t legacy_addr() const {
return addr_of_type(entity_addr_t::TYPE_LEGACY);
}
entity_addr_t as_legacy_addr() const {
for (auto& a : v) {
if (a.is_legacy()) {
return a;
}
if (a.is_any()) {
auto b = a;
b.set_type(entity_addr_t::TYPE_LEGACY);
return b;
}
}
// hrm... lie!
auto a = front();
a.set_type(entity_addr_t::TYPE_LEGACY);
return a;
}
entity_addr_t front() const {
if (!v.empty()) {
return v.front();
}
return entity_addr_t();
}
entity_addr_t legacy_or_front_addr() const {
for (auto& a : v) {
if (a.type == entity_addr_t::TYPE_LEGACY) {
return a;
}
}
return front();
}
std::string get_legacy_str() const {
return legacy_or_front_addr().get_legacy_str();
}
entity_addr_t msgr2_addr() const {
return addr_of_type(entity_addr_t::TYPE_MSGR2);
}
bool has_msgr2() const {
for (auto& a : v) {
if (a.is_msgr2()) {
return true;
}
}
return false;
}
entity_addr_t pick_addr(uint32_t type) const {
entity_addr_t picked_addr;
switch (type) {
case entity_addr_t::TYPE_LEGACY:
[[fallthrough]];
case entity_addr_t::TYPE_MSGR2:
picked_addr = addr_of_type(type);
break;
case entity_addr_t::TYPE_ANY:
return front();
default:
return {};
}
if (!picked_addr.is_blank_ip()) {
return picked_addr;
} else {
return addr_of_type(entity_addr_t::TYPE_ANY);
}
}
entity_addr_t addr_of_type(uint32_t type) const {
for (auto &a : v) {
if (a.type == type) {
return a;
}
}
return entity_addr_t();
}
bool parse(const char *s, const char **end = 0);
void get_ports(std::set<int> *ports) const {
for (auto& a : v) {
ports->insert(a.get_port());
}
}
std::set<int> get_ports() const {
std::set<int> r;
get_ports(&r);
return r;
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<entity_addrvec_t*>& ls);
bool legacy_equals(const entity_addrvec_t& o) const {
if (v == o.v) {
return true;
}
if (v.size() == 1 &&
front().is_legacy() &&
front() == o.legacy_addr()) {
return true;
}
if (o.v.size() == 1 &&
o.front().is_legacy() &&
o.front() == legacy_addr()) {
return true;
}
return false;
}
bool probably_equals(const entity_addrvec_t& o) const {
for (unsigned i = 0; i < v.size(); ++i) {
if (!v[i].probably_equals(o.v[i])) {
return false;
}
}
return true;
}
bool contains(const entity_addr_t& a) const {
for (auto& i : v) {
if (a == i) {
return true;
}
}
return false;
}
bool is_same_host(const entity_addr_t& a) const {
for (auto& i : v) {
if (i.is_same_host(a)) {
return true;
}
}
return false;
}
friend std::ostream& operator<<(std::ostream& out, const entity_addrvec_t& av) {
if (av.v.empty()) {
return out;
} else if (av.v.size() == 1) {
return out << av.v[0];
} else {
return out << av.v;
}
}
friend bool operator==(const entity_addrvec_t& l, const entity_addrvec_t& r) {
return l.v == r.v;
}
friend bool operator!=(const entity_addrvec_t& l, const entity_addrvec_t& r) {
return l.v != r.v;
}
friend bool operator<(const entity_addrvec_t& l, const entity_addrvec_t& r) {
return l.v < r.v; // see lexicographical_compare()
}
friend bool operator>(const entity_addrvec_t& l, const entity_addrvec_t& r) {
return l.v > r.v; // see lexicographical_compare()
}
};
WRITE_CLASS_ENCODER_FEATURES(entity_addrvec_t);
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<entity_addrvec_t> : fmt::ostream_formatter {};
#endif
namespace std {
template<> struct hash<entity_addrvec_t> {
size_t operator()( const entity_addrvec_t& x) const {
static blobhash H;
size_t r = 0;
for (auto& i : x.v) {
r += H((const char*)&i, sizeof(i));
}
return r;
}
};
} // namespace std
/*
* a particular entity instance
*/
struct entity_inst_t {
entity_name_t name;
entity_addr_t addr;
entity_inst_t() {}
entity_inst_t(entity_name_t n, const entity_addr_t& a) : name(n), addr(a) {}
// cppcheck-suppress noExplicitConstructor
entity_inst_t(const ceph_entity_inst& i) : name(i.name), addr(i.addr) { }
entity_inst_t(const ceph_entity_name& n, const ceph_entity_addr &a) : name(n), addr(a) {}
operator ceph_entity_inst() {
ceph_entity_inst i = {name, addr};
return i;
}
void encode(ceph::buffer::list& bl, uint64_t features) const {
using ceph::encode;
encode(name, bl);
encode(addr, bl, features);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
decode(name, bl);
decode(addr, bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<entity_inst_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(entity_inst_t)
inline bool operator==(const entity_inst_t& a, const entity_inst_t& b) {
return a.name == b.name && a.addr == b.addr;
}
inline bool operator!=(const entity_inst_t& a, const entity_inst_t& b) {
return a.name != b.name || a.addr != b.addr;
}
inline bool operator<(const entity_inst_t& a, const entity_inst_t& b) {
return a.name < b.name || (a.name == b.name && a.addr < b.addr);
}
inline bool operator<=(const entity_inst_t& a, const entity_inst_t& b) {
return a.name < b.name || (a.name == b.name && a.addr <= b.addr);
}
inline bool operator>(const entity_inst_t& a, const entity_inst_t& b) { return b < a; }
inline bool operator>=(const entity_inst_t& a, const entity_inst_t& b) { return b <= a; }
namespace std {
template<> struct hash< entity_inst_t >
{
size_t operator()( const entity_inst_t& x ) const
{
static hash< entity_name_t > H;
static hash< entity_addr_t > I;
return H(x.name) ^ I(x.addr);
}
};
} // namespace std
inline std::ostream& operator<<(std::ostream& out, const entity_inst_t &i)
{
return out << i.name << " " << i.addr;
}
inline std::ostream& operator<<(std::ostream& out, const ceph_entity_inst &i)
{
entity_inst_t n = i;
return out << n;
}
#endif
| 22,865 | 26.417266 | 113 |
h
|
null |
ceph-main/src/msg/async/AsyncConnection.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <unistd.h>
#include "include/Context.h"
#include "include/random.h"
#include "common/errno.h"
#include "AsyncMessenger.h"
#include "AsyncConnection.h"
#include "ProtocolV1.h"
#include "ProtocolV2.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "common/EventTrace.h"
// Constant to limit starting sequence number to 2^31. Nothing special about it, just a big number. PLR
#define SEQ_MASK 0x7fffffff
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix _conn_prefix(_dout)
std::ostream& AsyncConnection::_conn_prefix(std::ostream *_dout) {
return *_dout << "-- " << async_msgr->get_myaddrs() << " >> "
<< *peer_addrs << " conn(" << this
<< (msgr2 ? " msgr2=" : " legacy=")
<< protocol.get()
<< " " << ceph_con_mode_name(protocol->auth_meta->con_mode)
<< " :" << port
<< " s=" << get_state_name(state)
<< " l=" << policy.lossy
<< ").";
}
// Notes:
// 1. Don't dispatch any event when closed! It may cause AsyncConnection alive even if AsyncMessenger dead
const uint32_t AsyncConnection::TCP_PREFETCH_MIN_SIZE = 512;
class C_time_wakeup : public EventCallback {
AsyncConnectionRef conn;
public:
explicit C_time_wakeup(AsyncConnectionRef c): conn(c) {}
void do_request(uint64_t fd_or_id) override {
conn->wakeup_from(fd_or_id);
}
};
class C_handle_read : public EventCallback {
AsyncConnectionRef conn;
public:
explicit C_handle_read(AsyncConnectionRef c): conn(c) {}
void do_request(uint64_t fd_or_id) override {
conn->process();
}
};
class C_handle_write : public EventCallback {
AsyncConnectionRef conn;
public:
explicit C_handle_write(AsyncConnectionRef c): conn(c) {}
void do_request(uint64_t fd) override {
conn->handle_write();
}
};
class C_handle_write_callback : public EventCallback {
AsyncConnectionRef conn;
public:
explicit C_handle_write_callback(AsyncConnectionRef c) : conn(c) {}
void do_request(uint64_t fd) override { conn->handle_write_callback(); }
};
class C_clean_handler : public EventCallback {
AsyncConnectionRef conn;
public:
explicit C_clean_handler(AsyncConnectionRef c): conn(c) {}
void do_request(uint64_t id) override {
conn->cleanup();
delete this;
}
};
class C_tick_wakeup : public EventCallback {
AsyncConnectionRef conn;
public:
explicit C_tick_wakeup(AsyncConnectionRef c): conn(c) {}
void do_request(uint64_t fd_or_id) override {
conn->tick(fd_or_id);
}
};
AsyncConnection::AsyncConnection(CephContext *cct, AsyncMessenger *m, DispatchQueue *q,
Worker *w, bool m2, bool local)
: Connection(cct, m),
delay_state(NULL), async_msgr(m), conn_id(q->get_id()),
logger(w->get_perf_counter()),
labeled_logger(w->get_labeled_perf_counter()),
state(STATE_NONE), port(-1),
dispatch_queue(q), recv_buf(NULL),
recv_max_prefetch(std::max<int64_t>(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)),
recv_start(0), recv_end(0),
last_active(ceph::coarse_mono_clock::now()),
connect_timeout_us(cct->_conf->ms_connection_ready_timeout*1000*1000),
inactive_timeout_us(cct->_conf->ms_connection_idle_timeout*1000*1000),
msgr2(m2), state_offset(0),
worker(w), center(&w->center),read_buffer(nullptr)
{
#ifdef UNIT_TESTS_BUILT
this->interceptor = m->interceptor;
#endif
read_handler = new C_handle_read(this);
write_handler = new C_handle_write(this);
write_callback_handler = new C_handle_write_callback(this);
wakeup_handler = new C_time_wakeup(this);
tick_handler = new C_tick_wakeup(this);
// double recv_max_prefetch see "read_until"
recv_buf = new char[2*recv_max_prefetch];
if (local) {
protocol = std::unique_ptr<Protocol>(new LoopbackProtocolV1(this));
} else if (m2) {
protocol = std::unique_ptr<Protocol>(new ProtocolV2(this));
} else {
protocol = std::unique_ptr<Protocol>(new ProtocolV1(this));
}
logger->inc(l_msgr_created_connections);
}
AsyncConnection::~AsyncConnection()
{
if (recv_buf)
delete[] recv_buf;
ceph_assert(!delay_state);
}
int AsyncConnection::get_con_mode() const
{
return protocol->get_con_mode();
}
bool AsyncConnection::is_msgr2() const
{
return protocol->proto_type == 2;
}
void AsyncConnection::maybe_start_delay_thread()
{
if (!delay_state) {
async_msgr->cct->_conf.with_val<std::string>(
"ms_inject_delay_type",
[this](const std::string& s) {
if (s.find(ceph_entity_type_name(peer_type)) != std::string::npos) {
ldout(msgr->cct, 1) << __func__ << " setting up a delay queue"
<< dendl;
delay_state = new DelayedDelivery(async_msgr, center, dispatch_queue,
conn_id);
}
});
}
}
ssize_t AsyncConnection::read(unsigned len, char *buffer,
std::function<void(char *, ssize_t)> callback) {
ldout(async_msgr->cct, 20) << __func__
<< (pendingReadLen ? " continue" : " start")
<< " len=" << len << dendl;
ssize_t r = read_until(len, buffer);
if (r > 0) {
readCallback = callback;
pendingReadLen = len;
read_buffer = buffer;
}
return r;
}
// Because this func will be called multi times to populate
// the needed buffer, so the passed in bufferptr must be the same.
// Normally, only "read_message" will pass existing bufferptr in
//
// And it will uses readahead method to reduce small read overhead,
// "recv_buf" is used to store read buffer
//
// return the remaining bytes, 0 means this buffer is finished
// else return < 0 means error
ssize_t AsyncConnection::read_until(unsigned len, char *p)
{
ldout(async_msgr->cct, 25) << __func__ << " len is " << len << " state_offset is "
<< state_offset << dendl;
if (async_msgr->cct->_conf->ms_inject_socket_failures && cs) {
if (rand() % async_msgr->cct->_conf->ms_inject_socket_failures == 0) {
ldout(async_msgr->cct, 0) << __func__ << " injecting socket failure" << dendl;
cs.shutdown();
}
}
ssize_t r = 0;
uint64_t left = len - state_offset;
if (recv_end > recv_start) {
uint64_t to_read = std::min<uint64_t>(recv_end - recv_start, left);
memcpy(p, recv_buf+recv_start, to_read);
recv_start += to_read;
left -= to_read;
ldout(async_msgr->cct, 25) << __func__ << " got " << to_read << " in buffer "
<< " left is " << left << " buffer still has "
<< recv_end - recv_start << dendl;
if (left == 0) {
state_offset = 0;
return 0;
}
state_offset += to_read;
}
recv_end = recv_start = 0;
/* nothing left in the prefetch buffer */
if (left > (uint64_t)recv_max_prefetch) {
/* this was a large read, we don't prefetch for these */
do {
r = read_bulk(p+state_offset, left);
ldout(async_msgr->cct, 25) << __func__ << " read_bulk left is " << left << " got " << r << dendl;
if (r < 0) {
ldout(async_msgr->cct, 1) << __func__ << " read failed" << dendl;
return -1;
} else if (r == static_cast<int>(left)) {
state_offset = 0;
return 0;
}
state_offset += r;
left -= r;
} while (r > 0);
} else {
do {
r = read_bulk(recv_buf+recv_end, recv_max_prefetch);
ldout(async_msgr->cct, 25) << __func__ << " read_bulk recv_end is " << recv_end
<< " left is " << left << " got " << r << dendl;
if (r < 0) {
ldout(async_msgr->cct, 1) << __func__ << " read failed" << dendl;
return -1;
}
recv_end += r;
if (r >= static_cast<int>(left)) {
recv_start = len - state_offset;
memcpy(p+state_offset, recv_buf, recv_start);
state_offset = 0;
return 0;
}
left -= r;
} while (r > 0);
memcpy(p+state_offset, recv_buf, recv_end-recv_start);
state_offset += (recv_end - recv_start);
recv_end = recv_start = 0;
}
ldout(async_msgr->cct, 25) << __func__ << " need len " << len << " remaining "
<< len - state_offset << " bytes" << dendl;
return len - state_offset;
}
/* return -1 means `fd` occurs error or closed, it should be closed
* return 0 means EAGAIN or EINTR */
ssize_t AsyncConnection::read_bulk(char *buf, unsigned len)
{
ssize_t nread;
again:
nread = cs.read(buf, len);
if (nread < 0) {
if (nread == -EAGAIN) {
nread = 0;
} else if (nread == -EINTR) {
goto again;
} else {
ldout(async_msgr->cct, 1) << __func__ << " reading from fd=" << cs.fd()
<< " : "<< nread << " " << strerror(nread) << dendl;
return -1;
}
} else if (nread == 0) {
ldout(async_msgr->cct, 1) << __func__ << " peer close file descriptor "
<< cs.fd() << dendl;
return -1;
}
return nread;
}
ssize_t AsyncConnection::write(ceph::buffer::list &bl,
std::function<void(ssize_t)> callback,
bool more) {
std::unique_lock<std::mutex> l(write_lock);
outgoing_bl.claim_append(bl);
ssize_t r = _try_send(more);
if (r > 0) {
writeCallback = callback;
}
return r;
}
// return the remaining bytes, it may larger than the length of ptr
// else return < 0 means error
ssize_t AsyncConnection::_try_send(bool more)
{
if (async_msgr->cct->_conf->ms_inject_socket_failures && cs) {
if (rand() % async_msgr->cct->_conf->ms_inject_socket_failures == 0) {
ldout(async_msgr->cct, 0) << __func__ << " injecting socket failure" << dendl;
cs.shutdown();
}
}
ceph_assert(center->in_thread());
ldout(async_msgr->cct, 25) << __func__ << " cs.send " << outgoing_bl.length()
<< " bytes" << dendl;
// network block would make ::send return EAGAIN, that would make here looks
// like do not call cs.send() and r = 0
ssize_t r = 0;
if (likely(!inject_network_congestion())) {
r = cs.send(outgoing_bl, more);
}
if (r < 0) {
ldout(async_msgr->cct, 1) << __func__ << " send error: " << cpp_strerror(r) << dendl;
return r;
}
ldout(async_msgr->cct, 10) << __func__ << " sent bytes " << r
<< " remaining bytes " << outgoing_bl.length() << dendl;
if (!open_write && is_queued()) {
center->create_file_event(cs.fd(), EVENT_WRITABLE, write_handler);
open_write = true;
}
if (open_write && !is_queued()) {
center->delete_file_event(cs.fd(), EVENT_WRITABLE);
open_write = false;
if (writeCallback) {
center->dispatch_event_external(write_callback_handler);
}
}
return outgoing_bl.length();
}
void AsyncConnection::inject_delay() {
if (async_msgr->cct->_conf->ms_inject_internal_delays) {
ldout(async_msgr->cct, 10) << __func__ << " sleep for " <<
async_msgr->cct->_conf->ms_inject_internal_delays << dendl;
utime_t t;
t.set_from_double(async_msgr->cct->_conf->ms_inject_internal_delays);
t.sleep();
}
}
bool AsyncConnection::inject_network_congestion() const {
return (async_msgr->cct->_conf->ms_inject_network_congestion > 0 &&
rand() % async_msgr->cct->_conf->ms_inject_network_congestion != 0);
}
void AsyncConnection::process() {
std::lock_guard<std::mutex> l(lock);
last_active = ceph::coarse_mono_clock::now();
recv_start_time = ceph::mono_clock::now();
ldout(async_msgr->cct, 20) << __func__ << dendl;
switch (state) {
case STATE_NONE: {
ldout(async_msgr->cct, 20) << __func__ << " enter none state" << dendl;
return;
}
case STATE_CLOSED: {
ldout(async_msgr->cct, 20) << __func__ << " socket closed" << dendl;
return;
}
case STATE_CONNECTING: {
ceph_assert(!policy.server);
// clear timer (if any) since we are connecting/re-connecting
if (last_tick_id) {
center->delete_time_event(last_tick_id);
}
last_connect_started = ceph::coarse_mono_clock::now();
last_tick_id = center->create_time_event(
connect_timeout_us, tick_handler);
if (cs) {
center->delete_file_event(cs.fd(), EVENT_READABLE | EVENT_WRITABLE);
cs.close();
}
SocketOptions opts;
opts.priority = async_msgr->get_socket_priority();
if (async_msgr->cct->_conf->mon_use_min_delay_socket) {
if (async_msgr->get_mytype() == CEPH_ENTITY_TYPE_MON &&
peer_is_mon()) {
opts.priority = SOCKET_PRIORITY_MIN_DELAY;
}
}
opts.connect_bind_addr = msgr->get_myaddrs().front();
ssize_t r = worker->connect(target_addr, opts, &cs);
if (r < 0) {
protocol->fault();
return;
}
center->create_file_event(cs.fd(), EVENT_READABLE, read_handler);
state = STATE_CONNECTING_RE;
}
case STATE_CONNECTING_RE: {
ssize_t r = cs.is_connected();
if (r < 0) {
ldout(async_msgr->cct, 1) << __func__ << " reconnect failed to "
<< target_addr << dendl;
if (r == -ECONNREFUSED) {
ldout(async_msgr->cct, 2)
<< __func__ << " connection refused!" << dendl;
dispatch_queue->queue_refused(this);
}
protocol->fault();
return;
} else if (r == 0) {
ldout(async_msgr->cct, 10)
<< __func__ << " nonblock connect inprogress" << dendl;
if (async_msgr->get_stack()->nonblock_connect_need_writable_event()) {
center->create_file_event(cs.fd(), EVENT_WRITABLE,
read_handler);
}
logger->tinc(l_msgr_running_recv_time,
ceph::mono_clock::now() - recv_start_time);
return;
}
center->delete_file_event(cs.fd(), EVENT_WRITABLE);
ldout(async_msgr->cct, 10)
<< __func__ << " connect successfully, ready to send banner" << dendl;
state = STATE_CONNECTION_ESTABLISHED;
break;
}
case STATE_ACCEPTING: {
center->create_file_event(cs.fd(), EVENT_READABLE, read_handler);
state = STATE_CONNECTION_ESTABLISHED;
if (async_msgr->cct->_conf->mon_use_min_delay_socket) {
if (async_msgr->get_mytype() == CEPH_ENTITY_TYPE_MON &&
peer_is_mon()) {
cs.set_priority(cs.fd(), SOCKET_PRIORITY_MIN_DELAY,
target_addr.get_family());
}
}
break;
}
case STATE_CONNECTION_ESTABLISHED: {
if (pendingReadLen) {
ssize_t r = read(*pendingReadLen, read_buffer, readCallback);
if (r <= 0) { // read all bytes, or an error occured
pendingReadLen.reset();
char *buf_tmp = read_buffer;
read_buffer = nullptr;
readCallback(buf_tmp, r);
}
logger->tinc(l_msgr_running_recv_time,
ceph::mono_clock::now() - recv_start_time);
return;
}
break;
}
}
protocol->read_event();
logger->tinc(l_msgr_running_recv_time,
ceph::mono_clock::now() - recv_start_time);
}
bool AsyncConnection::is_connected() {
return protocol->is_connected();
}
void AsyncConnection::connect(const entity_addrvec_t &addrs, int type,
entity_addr_t &target) {
std::lock_guard<std::mutex> l(lock);
set_peer_type(type);
set_peer_addrs(addrs);
policy = msgr->get_policy(type);
target_addr = target;
_connect();
}
void AsyncConnection::_connect()
{
ldout(async_msgr->cct, 10) << __func__ << dendl;
state = STATE_CONNECTING;
protocol->connect();
// rescheduler connection in order to avoid lock dep
// may called by external thread(send_message)
center->dispatch_event_external(read_handler);
}
void AsyncConnection::accept(ConnectedSocket socket,
const entity_addr_t &listen_addr,
const entity_addr_t &peer_addr)
{
ldout(async_msgr->cct, 10) << __func__ << " sd=" << socket.fd()
<< " listen_addr " << listen_addr
<< " peer_addr " << peer_addr << dendl;
ceph_assert(socket.fd() >= 0);
std::lock_guard<std::mutex> l(lock);
cs = std::move(socket);
socket_addr = listen_addr;
target_addr = peer_addr; // until we know better
state = STATE_ACCEPTING;
protocol->accept();
// rescheduler connection in order to avoid lock dep
center->dispatch_event_external(read_handler);
}
int AsyncConnection::send_message(Message *m)
{
FUNCTRACE(async_msgr->cct);
lgeneric_subdout(async_msgr->cct, ms,
1) << "-- " << async_msgr->get_myaddrs() << " --> "
<< get_peer_addrs() << " -- "
<< *m << " -- " << m << " con "
<< this
<< dendl;
if (is_blackhole()) {
lgeneric_subdout(async_msgr->cct, ms, 0) << __func__ << ceph_entity_type_name(peer_type)
<< " blackhole " << *m << dendl;
m->put();
return 0;
}
// optimistic think it's ok to encode(actually may broken now)
if (!m->get_priority())
m->set_priority(async_msgr->get_default_send_priority());
m->get_header().src = async_msgr->get_myname();
m->set_connection(this);
#if defined(WITH_EVENTTRACE)
if (m->get_type() == CEPH_MSG_OSD_OP)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OP_BEGIN", true);
else if (m->get_type() == CEPH_MSG_OSD_OPREPLY)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OPREPLY_BEGIN", true);
#endif
if (is_loopback) { //loopback connection
ldout(async_msgr->cct, 20) << __func__ << " " << *m << " local" << dendl;
std::lock_guard<std::mutex> l(write_lock);
if (protocol->is_connected()) {
dispatch_queue->local_delivery(m, m->get_priority());
} else {
ldout(async_msgr->cct, 10) << __func__ << " loopback connection closed."
<< " Drop message " << m << dendl;
m->put();
}
return 0;
}
// we don't want to consider local message here, it's too lightweight which
// may disturb users
logger->inc(l_msgr_send_messages);
protocol->send_message(m);
return 0;
}
entity_addr_t AsyncConnection::_infer_target_addr(const entity_addrvec_t& av)
{
// pick the first addr of the same address family as socket_addr. it could be
// an any: or v2: addr, we don't care. it should not be a v1 addr.
for (auto& i : av.v) {
if (i.is_legacy()) {
continue;
}
if (i.get_family() == socket_addr.get_family()) {
ldout(async_msgr->cct,10) << __func__ << " " << av << " -> " << i << dendl;
return i;
}
}
ldout(async_msgr->cct,10) << __func__ << " " << av << " -> nothing to match "
<< socket_addr << dendl;
return {};
}
void AsyncConnection::fault()
{
shutdown_socket();
open_write = false;
// queue delayed items immediately
if (delay_state)
delay_state->flush();
recv_start = recv_end = 0;
state_offset = 0;
outgoing_bl.clear();
}
void AsyncConnection::_stop() {
writeCallback.reset();
dispatch_queue->discard_queue(conn_id);
async_msgr->unregister_conn(this);
worker->release_worker();
state = STATE_CLOSED;
open_write = false;
state_offset = 0;
// Make sure in-queue events will been processed
center->dispatch_event_external(EventCallbackRef(new C_clean_handler(this)));
}
bool AsyncConnection::is_queued() const {
return outgoing_bl.length();
}
void AsyncConnection::shutdown_socket() {
for (auto &&t : register_time_events) center->delete_time_event(t);
register_time_events.clear();
if (last_tick_id) {
center->delete_time_event(last_tick_id);
last_tick_id = 0;
}
if (cs) {
center->delete_file_event(cs.fd(), EVENT_READABLE | EVENT_WRITABLE);
cs.shutdown();
cs.close();
}
}
void AsyncConnection::DelayedDelivery::do_request(uint64_t id)
{
Message *m = nullptr;
{
std::lock_guard<std::mutex> l(delay_lock);
register_time_events.erase(id);
if (stop_dispatch)
return ;
if (delay_queue.empty())
return ;
m = delay_queue.front();
delay_queue.pop_front();
}
if (msgr->ms_can_fast_dispatch(m)) {
dispatch_queue->fast_dispatch(m);
} else {
dispatch_queue->enqueue(m, m->get_priority(), conn_id);
}
}
void AsyncConnection::DelayedDelivery::discard() {
stop_dispatch = true;
center->submit_to(center->get_id(),
[this]() mutable {
std::lock_guard<std::mutex> l(delay_lock);
while (!delay_queue.empty()) {
Message *m = delay_queue.front();
dispatch_queue->dispatch_throttle_release(
m->get_dispatch_throttle_size());
m->put();
delay_queue.pop_front();
}
for (auto i : register_time_events)
center->delete_time_event(i);
register_time_events.clear();
stop_dispatch = false;
},
true);
}
void AsyncConnection::DelayedDelivery::flush() {
stop_dispatch = true;
center->submit_to(
center->get_id(), [this] () mutable {
std::lock_guard<std::mutex> l(delay_lock);
while (!delay_queue.empty()) {
Message *m = delay_queue.front();
if (msgr->ms_can_fast_dispatch(m)) {
dispatch_queue->fast_dispatch(m);
} else {
dispatch_queue->enqueue(m, m->get_priority(), conn_id);
}
delay_queue.pop_front();
}
for (auto i : register_time_events)
center->delete_time_event(i);
register_time_events.clear();
stop_dispatch = false;
}, true);
}
void AsyncConnection::send_keepalive()
{
protocol->send_keepalive();
}
void AsyncConnection::mark_down()
{
ldout(async_msgr->cct, 1) << __func__ << dendl;
std::lock_guard<std::mutex> l(lock);
protocol->stop();
}
void AsyncConnection::handle_write()
{
ldout(async_msgr->cct, 10) << __func__ << dendl;
protocol->write_event();
}
void AsyncConnection::handle_write_callback() {
std::lock_guard<std::mutex> l(lock);
last_active = ceph::coarse_mono_clock::now();
recv_start_time = ceph::mono_clock::now();
write_lock.lock();
if (writeCallback) {
auto callback = *writeCallback;
writeCallback.reset();
write_lock.unlock();
callback(0);
return;
}
write_lock.unlock();
}
void AsyncConnection::stop(bool queue_reset) {
lock.lock();
bool need_queue_reset = (state != STATE_CLOSED) && queue_reset;
protocol->stop();
lock.unlock();
if (need_queue_reset) dispatch_queue->queue_reset(this);
}
void AsyncConnection::cleanup() {
shutdown_socket();
delete read_handler;
delete write_handler;
delete write_callback_handler;
delete wakeup_handler;
delete tick_handler;
if (delay_state) {
delete delay_state;
delay_state = NULL;
}
}
void AsyncConnection::wakeup_from(uint64_t id)
{
lock.lock();
register_time_events.erase(id);
lock.unlock();
process();
}
void AsyncConnection::tick(uint64_t id)
{
auto now = ceph::coarse_mono_clock::now();
ldout(async_msgr->cct, 20) << __func__ << " last_id=" << last_tick_id
<< " last_active=" << last_active << dendl;
std::lock_guard<std::mutex> l(lock);
last_tick_id = 0;
if (!is_connected()) {
if (connect_timeout_us <=
(uint64_t)std::chrono::duration_cast<std::chrono::microseconds>
(now - last_connect_started).count()) {
ldout(async_msgr->cct, 1) << __func__ << " see no progress in more than "
<< connect_timeout_us
<< " us during connecting to "
<< target_addr << ", fault."
<< dendl;
protocol->fault();
labeled_logger->inc(l_msgr_connection_ready_timeouts);
} else {
last_tick_id = center->create_time_event(connect_timeout_us, tick_handler);
}
} else {
auto idle_period = std::chrono::duration_cast<std::chrono::microseconds>
(now - last_active).count();
if (inactive_timeout_us < (uint64_t)idle_period) {
ldout(async_msgr->cct, 1) << __func__ << " idle (" << idle_period
<< ") for more than " << inactive_timeout_us
<< " us, fault."
<< dendl;
protocol->fault();
labeled_logger->inc(l_msgr_connection_idle_timeouts);
} else {
last_tick_id = center->create_time_event(inactive_timeout_us, tick_handler);
}
}
}
| 24,932 | 29.630221 | 108 |
cc
|
null |
ceph-main/src/msg/async/AsyncConnection.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_ASYNCCONNECTION_H
#define CEPH_MSG_ASYNCCONNECTION_H
#include <atomic>
#include <pthread.h>
#include <climits>
#include <list>
#include <mutex>
#include <map>
#include <functional>
#include <optional>
#include "auth/AuthSessionHandler.h"
#include "common/ceph_time.h"
#include "common/perf_counters.h"
#include "include/buffer.h"
#include "msg/Connection.h"
#include "msg/Messenger.h"
#include "Event.h"
#include "Stack.h"
class AsyncMessenger;
class DispatchQueue;
class Worker;
class Protocol;
static const int ASYNC_IOV_MAX = (IOV_MAX >= 1024 ? IOV_MAX / 4 : IOV_MAX);
/*
* AsyncConnection maintains a logic session between two endpoints. In other
* word, a pair of addresses can find the only AsyncConnection. AsyncConnection
* will handle with network fault or read/write transactions. If one file
* descriptor broken, AsyncConnection will maintain the message queue and
* sequence, try to reconnect peer endpoint.
*/
class AsyncConnection : public Connection {
ssize_t read(unsigned len, char *buffer,
std::function<void(char *, ssize_t)> callback);
ssize_t read_until(unsigned needed, char *p);
ssize_t read_bulk(char *buf, unsigned len);
ssize_t write(ceph::buffer::list &bl, std::function<void(ssize_t)> callback,
bool more=false);
ssize_t _try_send(bool more=false);
void _connect();
void _stop();
void fault();
void inject_delay();
bool inject_network_congestion() const;
bool is_queued() const;
void shutdown_socket();
/**
* The DelayedDelivery is for injecting delays into Message delivery off
* the socket. It is only enabled if delays are requested, and if they
* are then it pulls Messages off the DelayQueue and puts them into the
* AsyncMessenger event queue.
*/
class DelayedDelivery : public EventCallback {
std::set<uint64_t> register_time_events; // need to delete it if stop
std::deque<Message*> delay_queue;
std::mutex delay_lock;
AsyncMessenger *msgr;
EventCenter *center;
DispatchQueue *dispatch_queue;
uint64_t conn_id;
std::atomic_bool stop_dispatch;
public:
explicit DelayedDelivery(AsyncMessenger *omsgr, EventCenter *c,
DispatchQueue *q, uint64_t cid)
: msgr(omsgr), center(c), dispatch_queue(q), conn_id(cid),
stop_dispatch(false) { }
~DelayedDelivery() override {
ceph_assert(register_time_events.empty());
ceph_assert(delay_queue.empty());
}
void set_center(EventCenter *c) { center = c; }
void do_request(uint64_t id) override;
void queue(double delay_period, Message *m) {
std::lock_guard<std::mutex> l(delay_lock);
delay_queue.push_back(m);
register_time_events.insert(center->create_time_event(delay_period*1000000, this));
}
void discard();
bool ready() const { return !stop_dispatch && delay_queue.empty() && register_time_events.empty(); }
void flush();
} *delay_state;
private:
FRIEND_MAKE_REF(AsyncConnection);
AsyncConnection(CephContext *cct, AsyncMessenger *m, DispatchQueue *q,
Worker *w, bool is_msgr2, bool local);
~AsyncConnection() override;
bool unregistered = false;
public:
void maybe_start_delay_thread();
std::ostream& _conn_prefix(std::ostream *_dout);
bool is_connected() override;
// Only call when AsyncConnection first construct
void connect(const entity_addrvec_t& addrs, int type, entity_addr_t& target);
// Only call when AsyncConnection first construct
void accept(ConnectedSocket socket,
const entity_addr_t &listen_addr,
const entity_addr_t &peer_addr);
int send_message(Message *m) override;
void send_keepalive() override;
void mark_down() override;
void mark_disposable() override {
std::lock_guard<std::mutex> l(lock);
policy.lossy = true;
}
entity_addr_t get_peer_socket_addr() const override {
return target_addr;
}
int get_con_mode() const override;
bool is_unregistered() const {
return unregistered;
}
void unregister() {
unregistered = true;
}
private:
enum {
STATE_NONE,
STATE_CONNECTING,
STATE_CONNECTING_RE,
STATE_ACCEPTING,
STATE_CONNECTION_ESTABLISHED,
STATE_CLOSED
};
static const uint32_t TCP_PREFETCH_MIN_SIZE;
static const char *get_state_name(int state) {
const char* const statenames[] = {"STATE_NONE",
"STATE_CONNECTING",
"STATE_CONNECTING_RE",
"STATE_ACCEPTING",
"STATE_CONNECTION_ESTABLISHED",
"STATE_CLOSED"};
return statenames[state];
}
AsyncMessenger *async_msgr;
uint64_t conn_id;
PerfCounters *logger;
PerfCounters *labeled_logger;
int state;
ConnectedSocket cs;
int port;
public:
Messenger::Policy policy;
private:
DispatchQueue *dispatch_queue;
// lockfree, only used in own thread
ceph::buffer::list outgoing_bl;
bool open_write = false;
std::mutex write_lock;
std::mutex lock;
EventCallbackRef read_handler;
EventCallbackRef write_handler;
EventCallbackRef write_callback_handler;
EventCallbackRef wakeup_handler;
EventCallbackRef tick_handler;
char *recv_buf;
uint32_t recv_max_prefetch;
uint32_t recv_start;
uint32_t recv_end;
std::set<uint64_t> register_time_events; // need to delete it if stop
ceph::coarse_mono_clock::time_point last_connect_started;
ceph::coarse_mono_clock::time_point last_active;
ceph::mono_clock::time_point recv_start_time;
uint64_t last_tick_id = 0;
const uint64_t connect_timeout_us;
const uint64_t inactive_timeout_us;
// Tis section are temp variables used by state transition
// Accepting state
bool msgr2 = false;
entity_addr_t socket_addr; ///< local socket addr
entity_addr_t target_addr; ///< which of the peer_addrs we're connecting to (as clienet) or should reconnect to (as peer)
entity_addr_t _infer_target_addr(const entity_addrvec_t& av);
// used only by "read_until"
uint64_t state_offset;
Worker *worker;
EventCenter *center;
std::unique_ptr<Protocol> protocol;
std::optional<std::function<void(ssize_t)>> writeCallback;
std::function<void(char *, ssize_t)> readCallback;
std::optional<unsigned> pendingReadLen;
char *read_buffer;
public:
// used by eventcallback
void handle_write();
void handle_write_callback();
void process();
void wakeup_from(uint64_t id);
void tick(uint64_t id);
void stop(bool queue_reset);
void cleanup();
PerfCounters *get_perf_counter() {
return logger;
}
bool is_msgr2() const override;
friend class Protocol;
friend class ProtocolV1;
friend class ProtocolV2;
}; /* AsyncConnection */
using AsyncConnectionRef = ceph::ref_t<AsyncConnection>;
#endif
| 7,342 | 27.909449 | 124 |
h
|
null |
ceph-main/src/msg/async/AsyncMessenger.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "acconfig.h"
#include <iostream>
#include <fstream>
#include "AsyncMessenger.h"
#include "common/config.h"
#include "common/Timer.h"
#include "common/errno.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "common/EventTrace.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
static std::ostream& _prefix(std::ostream *_dout, AsyncMessenger *m) {
return *_dout << "-- " << m->get_myaddrs() << " ";
}
static std::ostream& _prefix(std::ostream *_dout, Processor *p) {
return *_dout << " Processor -- ";
}
/*******************
* Processor
*/
class Processor::C_processor_accept : public EventCallback {
Processor *pro;
public:
explicit C_processor_accept(Processor *p): pro(p) {}
void do_request(uint64_t id) override {
pro->accept();
}
};
Processor::Processor(AsyncMessenger *r, Worker *w, CephContext *c)
: msgr(r), net(c), worker(w),
listen_handler(new C_processor_accept(this)) {}
int Processor::bind(const entity_addrvec_t &bind_addrs,
const std::set<int>& avoid_ports,
entity_addrvec_t* bound_addrs)
{
const auto& conf = msgr->cct->_conf;
// bind to socket(s)
ldout(msgr->cct, 10) << __func__ << " " << bind_addrs << dendl;
SocketOptions opts;
opts.nodelay = msgr->cct->_conf->ms_tcp_nodelay;
opts.rcbuf_size = msgr->cct->_conf->ms_tcp_rcvbuf;
listen_sockets.resize(bind_addrs.v.size());
*bound_addrs = bind_addrs;
for (unsigned k = 0; k < bind_addrs.v.size(); ++k) {
auto& listen_addr = bound_addrs->v[k];
/* bind to port */
int r = -1;
for (int i = 0; i < conf->ms_bind_retry_count; i++) {
if (i > 0) {
lderr(msgr->cct) << __func__ << " was unable to bind. Trying again in "
<< conf->ms_bind_retry_delay << " seconds " << dendl;
sleep(conf->ms_bind_retry_delay);
}
if (listen_addr.get_port()) {
worker->center.submit_to(
worker->center.get_id(),
[this, k, &listen_addr, &opts, &r]() {
r = worker->listen(listen_addr, k, opts, &listen_sockets[k]);
}, false);
if (r < 0) {
lderr(msgr->cct) << __func__ << " unable to bind to " << listen_addr
<< ": " << cpp_strerror(r) << dendl;
continue;
}
} else {
// try a range of ports
for (int port = msgr->cct->_conf->ms_bind_port_min;
port <= msgr->cct->_conf->ms_bind_port_max;
port++) {
if (avoid_ports.count(port))
continue;
listen_addr.set_port(port);
worker->center.submit_to(
worker->center.get_id(),
[this, k, &listen_addr, &opts, &r]() {
r = worker->listen(listen_addr, k, opts, &listen_sockets[k]);
}, false);
if (r == 0)
break;
}
if (r < 0) {
lderr(msgr->cct) << __func__ << " unable to bind to " << listen_addr
<< " on any port in range "
<< msgr->cct->_conf->ms_bind_port_min
<< "-" << msgr->cct->_conf->ms_bind_port_max << ": "
<< cpp_strerror(r) << dendl;
listen_addr.set_port(0); // Clear port before retry, otherwise we shall fail again.
continue;
}
ldout(msgr->cct, 10) << __func__ << " bound on random port "
<< listen_addr << dendl;
}
if (r == 0) {
break;
}
}
// It seems that binding completely failed, return with that exit status
if (r < 0) {
lderr(msgr->cct) << __func__ << " was unable to bind after "
<< conf->ms_bind_retry_count
<< " attempts: " << cpp_strerror(r) << dendl;
for (unsigned j = 0; j < k; ++j) {
// clean up previous bind
listen_sockets[j].abort_accept();
}
return r;
}
}
ldout(msgr->cct, 10) << __func__ << " bound to " << *bound_addrs << dendl;
return 0;
}
void Processor::start()
{
ldout(msgr->cct, 1) << __func__ << dendl;
// start thread
worker->center.submit_to(worker->center.get_id(), [this]() {
for (auto& listen_socket : listen_sockets) {
if (listen_socket) {
if (listen_socket.fd() == -1) {
ldout(msgr->cct, 1) << __func__
<< " Error: processor restart after listen_socket.fd closed. "
<< this << dendl;
return;
}
worker->center.create_file_event(listen_socket.fd(), EVENT_READABLE,
listen_handler); }
}
}, false);
}
void Processor::accept()
{
SocketOptions opts;
opts.nodelay = msgr->cct->_conf->ms_tcp_nodelay;
opts.rcbuf_size = msgr->cct->_conf->ms_tcp_rcvbuf;
opts.priority = msgr->get_socket_priority();
for (auto& listen_socket : listen_sockets) {
ldout(msgr->cct, 10) << __func__ << " listen_fd=" << listen_socket.fd()
<< dendl;
unsigned accept_error_num = 0;
while (true) {
entity_addr_t addr;
ConnectedSocket cli_socket;
Worker *w = worker;
if (!msgr->get_stack()->support_local_listen_table())
w = msgr->get_stack()->get_worker();
else
++w->references;
int r = listen_socket.accept(&cli_socket, opts, &addr, w);
if (r == 0) {
ldout(msgr->cct, 10) << __func__ << " accepted incoming on sd "
<< cli_socket.fd() << dendl;
msgr->add_accept(
w, std::move(cli_socket),
msgr->get_myaddrs().v[listen_socket.get_addr_slot()],
addr);
accept_error_num = 0;
continue;
} else {
--w->references;
if (r == -EINTR) {
continue;
} else if (r == -EAGAIN) {
break;
} else if (r == -EMFILE || r == -ENFILE) {
lderr(msgr->cct) << __func__ << " open file descriptions limit reached sd = " << listen_socket.fd()
<< " errno " << r << " " << cpp_strerror(r) << dendl;
if (++accept_error_num > msgr->cct->_conf->ms_max_accept_failures) {
lderr(msgr->cct) << "Proccessor accept has encountered enough error numbers, just do ceph_abort()." << dendl;
ceph_abort();
}
continue;
} else if (r == -ECONNABORTED) {
ldout(msgr->cct, 0) << __func__ << " it was closed because of rst arrived sd = " << listen_socket.fd()
<< " errno " << r << " " << cpp_strerror(r) << dendl;
continue;
} else {
lderr(msgr->cct) << __func__ << " no incoming connection?"
<< " errno " << r << " " << cpp_strerror(r) << dendl;
if (++accept_error_num > msgr->cct->_conf->ms_max_accept_failures) {
lderr(msgr->cct) << "Proccessor accept has encountered enough error numbers, just do ceph_abort()." << dendl;
ceph_abort();
}
continue;
}
}
}
}
}
void Processor::stop()
{
ldout(msgr->cct,10) << __func__ << dendl;
worker->center.submit_to(worker->center.get_id(), [this]() {
for (auto& listen_socket : listen_sockets) {
if (listen_socket) {
worker->center.delete_file_event(listen_socket.fd(), EVENT_READABLE);
listen_socket.abort_accept();
}
}
}, false);
}
struct StackSingleton {
CephContext *cct;
std::shared_ptr<NetworkStack> stack;
explicit StackSingleton(CephContext *c): cct(c) {}
void ready(std::string &type) {
if (!stack)
stack = NetworkStack::create(cct, type);
}
~StackSingleton() {
stack->stop();
}
};
class C_handle_reap : public EventCallback {
AsyncMessenger *msgr;
public:
explicit C_handle_reap(AsyncMessenger *m): msgr(m) {}
void do_request(uint64_t id) override {
// judge whether is a time event
msgr->reap_dead();
}
};
/*******************
* AsyncMessenger
*/
AsyncMessenger::AsyncMessenger(CephContext *cct, entity_name_t name,
const std::string &type, std::string mname, uint64_t _nonce)
: SimplePolicyMessenger(cct, name),
dispatch_queue(cct, this, mname),
nonce(_nonce)
{
std::string transport_type = "posix";
if (type.find("rdma") != std::string::npos)
transport_type = "rdma";
else if (type.find("dpdk") != std::string::npos)
transport_type = "dpdk";
auto single = &cct->lookup_or_create_singleton_object<StackSingleton>(
"AsyncMessenger::NetworkStack::" + transport_type, true, cct);
single->ready(transport_type);
stack = single->stack.get();
stack->start();
local_worker = stack->get_worker();
local_connection = ceph::make_ref<AsyncConnection>(cct, this, &dispatch_queue,
local_worker, true, true);
init_local_connection();
reap_handler = new C_handle_reap(this);
unsigned processor_num = 1;
if (stack->support_local_listen_table())
processor_num = stack->get_num_worker();
for (unsigned i = 0; i < processor_num; ++i)
processors.push_back(new Processor(this, stack->get_worker(i), cct));
}
/**
* Destroy the AsyncMessenger. Pretty simple since all the work is done
* elsewhere.
*/
AsyncMessenger::~AsyncMessenger()
{
delete reap_handler;
ceph_assert(!did_bind); // either we didn't bind or we shut down the Processor
for (auto &&p : processors)
delete p;
}
void AsyncMessenger::ready()
{
ldout(cct,10) << __func__ << " " << get_myaddrs() << dendl;
stack->ready();
if (pending_bind) {
int err = bindv(pending_bind_addrs, saved_public_addrs);
if (err) {
lderr(cct) << __func__ << " postponed bind failed" << dendl;
ceph_abort();
}
}
std::lock_guard l{lock};
for (auto &&p : processors)
p->start();
dispatch_queue.start();
}
int AsyncMessenger::shutdown()
{
ldout(cct,10) << __func__ << " " << get_myaddrs() << dendl;
// done! clean up.
for (auto &&p : processors)
p->stop();
mark_down_all();
// break ref cycles on the loopback connection
local_connection->clear_priv();
local_connection->mark_down();
did_bind = false;
lock.lock();
stop_cond.notify_all();
stopped = true;
lock.unlock();
stack->drain();
return 0;
}
int AsyncMessenger::bind(const entity_addr_t &bind_addr,
std::optional<entity_addrvec_t> public_addrs)
{
ldout(cct, 10) << __func__ << " " << bind_addr
<< " public " << public_addrs << dendl;
// old bind() can take entity_addr_t(). new bindv() can take a
// 0.0.0.0-like address but needs type and family to be set.
auto a = bind_addr;
if (a == entity_addr_t()) {
a.set_type(entity_addr_t::TYPE_LEGACY);
if (cct->_conf->ms_bind_ipv6) {
a.set_family(AF_INET6);
} else {
a.set_family(AF_INET);
}
}
return bindv(entity_addrvec_t(a), public_addrs);
}
int AsyncMessenger::bindv(const entity_addrvec_t &bind_addrs,
std::optional<entity_addrvec_t> public_addrs)
{
lock.lock();
if (!pending_bind && started) {
ldout(cct,10) << __func__ << " already started" << dendl;
lock.unlock();
return -1;
}
ldout(cct, 10) << __func__ << " " << bind_addrs
<< " public " << public_addrs << dendl;
if (public_addrs && bind_addrs != public_addrs) {
// for the sake of rebind() and the is-not-ready case let's
// store public_addrs. there is no point in that if public
// addrs are indifferent from bind_addrs.
saved_public_addrs = std::move(public_addrs);
}
if (!stack->is_ready()) {
ldout(cct, 10) << __func__ << " Network Stack is not ready for bind yet - postponed" << dendl;
pending_bind_addrs = bind_addrs;
pending_bind = true;
lock.unlock();
return 0;
}
lock.unlock();
// bind to a socket
std::set<int> avoid_ports;
entity_addrvec_t bound_addrs;
unsigned i = 0;
for (auto &&p : processors) {
int r = p->bind(bind_addrs, avoid_ports, &bound_addrs);
if (r) {
// Note: this is related to local tcp listen table problem.
// Posix(default kernel implementation) backend shares listen table
// in the kernel, so all threads can use the same listen table naturally
// and only one thread need to bind. But other backends(like dpdk) uses local
// listen table, we need to bind/listen tcp port for each worker. So if the
// first worker failed to bind, it could be think the normal error then handle
// it, like port is used case. But if the first worker successfully to bind
// but the second worker failed, it's not expected and we need to assert
// here
ceph_assert(i == 0);
return r;
}
++i;
}
_finish_bind(bind_addrs, bound_addrs);
return 0;
}
int AsyncMessenger::rebind(const std::set<int>& avoid_ports)
{
ldout(cct,1) << __func__ << " rebind avoid " << avoid_ports << dendl;
ceph_assert(did_bind);
for (auto &&p : processors)
p->stop();
mark_down_all();
// adjust the nonce; we want our entity_addr_t to be truly unique.
nonce += 1000000;
ldout(cct, 10) << __func__ << " new nonce " << nonce
<< " and addr " << get_myaddrs() << dendl;
entity_addrvec_t bound_addrs;
entity_addrvec_t bind_addrs = get_myaddrs();
std::set<int> new_avoid(avoid_ports);
for (auto& a : bind_addrs.v) {
new_avoid.insert(a.get_port());
a.set_port(0);
}
ldout(cct, 10) << __func__ << " will try " << bind_addrs
<< " and avoid ports " << new_avoid << dendl;
unsigned i = 0;
for (auto &&p : processors) {
int r = p->bind(bind_addrs, avoid_ports, &bound_addrs);
if (r) {
ceph_assert(i == 0);
return r;
}
++i;
}
_finish_bind(bind_addrs, bound_addrs);
for (auto &&p : processors) {
p->start();
}
return 0;
}
int AsyncMessenger::client_bind(const entity_addr_t &bind_addr)
{
if (!cct->_conf->ms_bind_before_connect)
return 0;
std::lock_guard l{lock};
if (did_bind) {
return 0;
}
if (started) {
ldout(cct, 10) << __func__ << " already started" << dendl;
return -1;
}
ldout(cct, 10) << __func__ << " " << bind_addr << dendl;
set_myaddrs(entity_addrvec_t(bind_addr));
return 0;
}
void AsyncMessenger::_finish_bind(const entity_addrvec_t& bind_addrs,
const entity_addrvec_t& listen_addrs)
{
set_myaddrs(bind_addrs);
for (auto& a : bind_addrs.v) {
if (!a.is_blank_ip()) {
learned_addr(a);
}
}
if (get_myaddrs().front().get_port() == 0) {
set_myaddrs(listen_addrs);
}
entity_addrvec_t newaddrs;
if (saved_public_addrs) {
newaddrs = *saved_public_addrs;
for (auto& public_addr : newaddrs.v) {
public_addr.set_nonce(nonce);
if (public_addr.is_ip() && public_addr.get_port() == 0) {
// port is not explicitly set. This is fine as it can be figured
// out by msgr. For instance, the low-level `Processor::bind`
// scans for free ports in a range controlled by ms_bind_port_min
// and ms_bind_port_max.
for (const auto& a : my_addrs->v) {
if (public_addr.get_type() == a.get_type() && a.is_ip()) {
public_addr.set_port(a.get_port());
}
}
}
}
} else {
newaddrs = *my_addrs;
for (auto& a : newaddrs.v) {
a.set_nonce(nonce);
}
}
set_myaddrs(newaddrs);
init_local_connection();
ldout(cct,1) << __func__ << " bind my_addrs is " << get_myaddrs() << dendl;
did_bind = true;
}
int AsyncMessenger::client_reset()
{
mark_down_all();
std::scoped_lock l{lock};
// adjust the nonce; we want our entity_addr_t to be truly unique.
nonce += 1000000;
ldout(cct, 10) << __func__ << " new nonce " << nonce << dendl;
entity_addrvec_t newaddrs = *my_addrs;
for (auto& a : newaddrs.v) {
a.set_nonce(nonce);
}
set_myaddrs(newaddrs);
_init_local_connection();
return 0;
}
int AsyncMessenger::start()
{
std::scoped_lock l{lock};
ldout(cct,1) << __func__ << " start" << dendl;
// register at least one entity, first!
ceph_assert(my_name.type() >= 0);
ceph_assert(!started);
started = true;
stopped = false;
if (!did_bind) {
entity_addrvec_t newaddrs = *my_addrs;
for (auto& a : newaddrs.v) {
a.nonce = nonce;
}
set_myaddrs(newaddrs);
_init_local_connection();
}
return 0;
}
void AsyncMessenger::wait()
{
{
std::unique_lock locker{lock};
if (!started) {
return;
}
if (!stopped)
stop_cond.wait(locker);
}
dispatch_queue.shutdown();
if (dispatch_queue.is_started()) {
ldout(cct, 10) << __func__ << ": waiting for dispatch queue" << dendl;
dispatch_queue.wait();
dispatch_queue.discard_local();
ldout(cct, 10) << __func__ << ": dispatch queue is stopped" << dendl;
}
// close all connections
shutdown_connections(false);
stack->drain();
ldout(cct, 10) << __func__ << ": done." << dendl;
ldout(cct, 1) << __func__ << " complete." << dendl;
started = false;
}
void AsyncMessenger::add_accept(Worker *w, ConnectedSocket cli_socket,
const entity_addr_t &listen_addr,
const entity_addr_t &peer_addr)
{
std::lock_guard l{lock};
auto conn = ceph::make_ref<AsyncConnection>(cct, this, &dispatch_queue, w,
listen_addr.is_msgr2(), false);
conn->accept(std::move(cli_socket), listen_addr, peer_addr);
accepting_conns.insert(conn);
}
AsyncConnectionRef AsyncMessenger::create_connect(
const entity_addrvec_t& addrs, int type, bool anon)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << __func__ << " " << addrs
<< ", creating connection and registering" << dendl;
// here is where we decide which of the addrs to connect to. always prefer
// the first one, if we support it.
entity_addr_t target;
for (auto& a : addrs.v) {
if (!a.is_msgr2() && !a.is_legacy()) {
continue;
}
// FIXME: for ipv4 vs ipv6, check whether local host can handle ipv6 before
// trying it? for now, just pick whichever is listed first.
target = a;
break;
}
// create connection
Worker *w = stack->get_worker();
auto conn = ceph::make_ref<AsyncConnection>(cct, this, &dispatch_queue, w,
target.is_msgr2(), false);
conn->anon = anon;
conn->connect(addrs, type, target);
if (anon) {
anon_conns.insert(conn);
} else {
ceph_assert(!conns.count(addrs));
ldout(cct, 10) << __func__ << " " << conn << " " << addrs << " "
<< *conn->peer_addrs << dendl;
conns[addrs] = conn;
}
w->get_perf_counter()->inc(l_msgr_active_connections);
return conn;
}
ConnectionRef AsyncMessenger::get_loopback_connection()
{
return local_connection;
}
bool AsyncMessenger::should_use_msgr2()
{
// if we are bound to v1 only, and we are connecting to a v2 peer,
// we cannot use the peer's v2 address. otherwise the connection
// is assymetrical, because they would have to use v1 to connect
// to us, and we would use v2, and connection race detection etc
// would totally break down (among other things). or, the other
// end will be confused that we advertise ourselve with a v1
// address only (that we bound to) but connected with protocol v2.
return !did_bind || get_myaddrs().has_msgr2();
}
entity_addrvec_t AsyncMessenger::_filter_addrs(const entity_addrvec_t& addrs)
{
if (!should_use_msgr2()) {
ldout(cct, 10) << __func__ << " " << addrs << " limiting to v1 ()" << dendl;
entity_addrvec_t r;
for (auto& i : addrs.v) {
if (i.is_msgr2()) {
continue;
}
r.v.push_back(i);
}
return r;
} else {
return addrs;
}
}
int AsyncMessenger::send_to(Message *m, int type, const entity_addrvec_t& addrs)
{
FUNCTRACE(cct);
ceph_assert(m);
#if defined(WITH_EVENTTRACE)
if (m->get_type() == CEPH_MSG_OSD_OP)
OID_EVENT_TRACE(((MOSDOp *)m)->get_oid().name.c_str(), "SEND_MSG_OSD_OP");
else if (m->get_type() == CEPH_MSG_OSD_OPREPLY)
OID_EVENT_TRACE(((MOSDOpReply *)m)->get_oid().name.c_str(), "SEND_MSG_OSD_OP_REPLY");
#endif
ldout(cct, 1) << __func__ << "--> " << ceph_entity_type_name(type) << " "
<< addrs << " -- " << *m << " -- ?+"
<< m->get_data().length() << " " << m << dendl;
if (addrs.empty()) {
ldout(cct,0) << __func__ << " message " << *m
<< " with empty dest " << addrs << dendl;
m->put();
return -EINVAL;
}
if (cct->_conf->ms_dump_on_send) {
m->encode(-1, MSG_CRC_ALL);
ldout(cct, 0) << __func__ << " submit_message " << *m << "\n";
m->get_payload().hexdump(*_dout);
if (m->get_data().length() > 0) {
*_dout << " data:\n";
m->get_data().hexdump(*_dout);
}
*_dout << dendl;
m->clear_payload();
}
connect_to(type, addrs, false)->send_message(m);
return 0;
}
ConnectionRef AsyncMessenger::connect_to(int type,
const entity_addrvec_t& addrs,
bool anon, bool not_local_dest)
{
if (!not_local_dest) {
if (*my_addrs == addrs ||
(addrs.v.size() == 1 &&
my_addrs->contains(addrs.front()))) {
// local
return local_connection;
}
}
auto av = _filter_addrs(addrs);
std::lock_guard l{lock};
if (anon) {
return create_connect(av, type, anon);
}
AsyncConnectionRef conn = _lookup_conn(av);
if (conn) {
ldout(cct, 10) << __func__ << " " << av << " existing " << conn << dendl;
} else {
conn = create_connect(av, type, false);
ldout(cct, 10) << __func__ << " " << av << " new " << conn << dendl;
}
return conn;
}
/**
* If my_addr doesn't have an IP set, this function
* will fill it in from the passed addr. Otherwise it does nothing and returns.
*/
bool AsyncMessenger::set_addr_unknowns(const entity_addrvec_t &addrs)
{
ldout(cct,1) << __func__ << " " << addrs << dendl;
bool ret = false;
std::lock_guard l{lock};
entity_addrvec_t newaddrs = *my_addrs;
for (auto& a : newaddrs.v) {
if (a.is_blank_ip()) {
int type = a.get_type();
int port = a.get_port();
uint32_t nonce = a.get_nonce();
for (auto& b : addrs.v) {
if (a.get_family() == b.get_family()) {
ldout(cct,1) << __func__ << " assuming my addr " << a
<< " matches provided addr " << b << dendl;
a = b;
a.set_nonce(nonce);
a.set_type(type);
a.set_port(port);
ret = true;
break;
}
}
}
}
set_myaddrs(newaddrs);
if (ret) {
_init_local_connection();
}
ldout(cct,1) << __func__ << " now " << *my_addrs << dendl;
return ret;
}
void AsyncMessenger::shutdown_connections(bool queue_reset)
{
ldout(cct,1) << __func__ << " " << dendl;
std::lock_guard l{lock};
for (const auto& c : accepting_conns) {
ldout(cct, 5) << __func__ << " accepting_conn " << c << dendl;
c->stop(queue_reset);
}
accepting_conns.clear();
for (const auto& [e, c] : conns) {
ldout(cct, 5) << __func__ << " mark down " << e << " " << c << dendl;
c->stop(queue_reset);
}
conns.clear();
for (const auto& c : anon_conns) {
ldout(cct, 5) << __func__ << " mark down " << c << dendl;
c->stop(queue_reset);
}
anon_conns.clear();
{
std::lock_guard l{deleted_lock};
for (const auto& c : deleted_conns) {
ldout(cct, 5) << __func__ << " delete " << c << dendl;
c->get_perf_counter()->dec(l_msgr_active_connections);
}
deleted_conns.clear();
}
}
void AsyncMessenger::mark_down_addrs(const entity_addrvec_t& addrs)
{
std::lock_guard l{lock};
const AsyncConnectionRef& conn = _lookup_conn(addrs);
if (conn) {
ldout(cct, 1) << __func__ << " " << addrs << " -- " << conn << dendl;
conn->stop(true);
} else {
ldout(cct, 1) << __func__ << " " << addrs << " -- connection dne" << dendl;
}
}
int AsyncMessenger::get_proto_version(int peer_type, bool connect) const
{
int my_type = my_name.type();
// set reply protocol version
if (peer_type == my_type) {
// internal
return cluster_protocol;
} else {
// public
switch (connect ? peer_type : my_type) {
case CEPH_ENTITY_TYPE_OSD: return CEPH_OSDC_PROTOCOL;
case CEPH_ENTITY_TYPE_MDS: return CEPH_MDSC_PROTOCOL;
case CEPH_ENTITY_TYPE_MON: return CEPH_MONC_PROTOCOL;
}
}
return 0;
}
int AsyncMessenger::accept_conn(const AsyncConnectionRef& conn)
{
std::lock_guard l{lock};
if (conn->policy.server &&
conn->policy.lossy &&
!conn->policy.register_lossy_clients) {
anon_conns.insert(conn);
conn->get_perf_counter()->inc(l_msgr_active_connections);
return 0;
}
auto it = conns.find(*conn->peer_addrs);
if (it != conns.end()) {
auto& existing = it->second;
// lazy delete, see "deleted_conns"
// If conn already in, we will return 0
std::lock_guard l{deleted_lock};
if (deleted_conns.erase(existing)) {
it->second->get_perf_counter()->dec(l_msgr_active_connections);
conns.erase(it);
} else if (conn != existing) {
return -1;
}
}
ldout(cct, 10) << __func__ << " " << conn << " " << *conn->peer_addrs << dendl;
conns[*conn->peer_addrs] = conn;
conn->get_perf_counter()->inc(l_msgr_active_connections);
accepting_conns.erase(conn);
return 0;
}
bool AsyncMessenger::learned_addr(const entity_addr_t &peer_addr_for_me)
{
// be careful here: multiple threads may block here, and readers of
// my_addr do NOT hold any lock.
// this always goes from true -> false under the protection of the
// mutex. if it is already false, we need not retake the mutex at
// all.
if (!need_addr)
return false;
std::lock_guard l(lock);
if (need_addr) {
if (my_addrs->empty()) {
auto a = peer_addr_for_me;
a.set_type(entity_addr_t::TYPE_ANY);
a.set_nonce(nonce);
if (!did_bind) {
a.set_port(0);
}
set_myaddrs(entity_addrvec_t(a));
ldout(cct,10) << __func__ << " had no addrs" << dendl;
} else {
// fix all addrs of the same family, regardless of type (msgr2 vs legacy)
entity_addrvec_t newaddrs = *my_addrs;
for (auto& a : newaddrs.v) {
if (a.is_blank_ip() &&
a.get_family() == peer_addr_for_me.get_family()) {
entity_addr_t t = peer_addr_for_me;
if (!did_bind) {
t.set_type(entity_addr_t::TYPE_ANY);
t.set_port(0);
} else {
t.set_type(a.get_type());
t.set_port(a.get_port());
}
t.set_nonce(a.get_nonce());
ldout(cct,10) << __func__ << " " << a << " -> " << t << dendl;
a = t;
}
}
set_myaddrs(newaddrs);
}
ldout(cct, 1) << __func__ << " learned my addr " << *my_addrs
<< " (peer_addr_for_me " << peer_addr_for_me << ")" << dendl;
_init_local_connection();
need_addr = false;
return true;
}
return false;
}
void AsyncMessenger::reap_dead()
{
ldout(cct, 1) << __func__ << " start" << dendl;
std::lock_guard l1{lock};
{
std::lock_guard l2{deleted_lock};
for (auto& c : deleted_conns) {
ldout(cct, 5) << __func__ << " delete " << c << dendl;
auto conns_it = conns.find(*c->peer_addrs);
if (conns_it != conns.end() && conns_it->second == c)
conns.erase(conns_it);
accepting_conns.erase(c);
anon_conns.erase(c);
c->get_perf_counter()->dec(l_msgr_active_connections);
}
deleted_conns.clear();
}
}
| 26,845 | 26.848548 | 114 |
cc
|
null |
ceph-main/src/msg/async/AsyncMessenger.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ASYNCMESSENGER_H
#define CEPH_ASYNCMESSENGER_H
#include <map>
#include <optional>
#include "include/types.h"
#include "include/xlist.h"
#include "include/spinlock.h"
#include "include/unordered_map.h"
#include "include/unordered_set.h"
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "common/Thread.h"
#include "msg/SimplePolicyMessenger.h"
#include "msg/DispatchQueue.h"
#include "AsyncConnection.h"
#include "Event.h"
#include "include/ceph_assert.h"
class AsyncMessenger;
/**
* If the Messenger binds to a specific address, the Processor runs
* and listens for incoming connections.
*/
class Processor {
AsyncMessenger *msgr;
ceph::NetHandler net;
Worker *worker;
std::vector<ServerSocket> listen_sockets;
EventCallbackRef listen_handler;
class C_processor_accept;
public:
Processor(AsyncMessenger *r, Worker *w, CephContext *c);
~Processor() { delete listen_handler; };
void stop();
int bind(const entity_addrvec_t &bind_addrs,
const std::set<int>& avoid_ports,
entity_addrvec_t* bound_addrs);
void start();
void accept();
};
/*
* AsyncMessenger is represented for maintaining a set of asynchronous connections,
* it may own a bind address and the accepted connections will be managed by
* AsyncMessenger.
*
*/
class AsyncMessenger : public SimplePolicyMessenger {
// First we have the public Messenger interface implementation...
public:
/**
* Initialize the AsyncMessenger!
*
* @param cct The CephContext to use
* @param name The name to assign ourselves
* _nonce A unique ID to use for this AsyncMessenger. It should not
* be a value that will be repeated if the daemon restarts.
*/
AsyncMessenger(CephContext *cct, entity_name_t name, const std::string &type,
std::string mname, uint64_t _nonce);
/**
* Destroy the AsyncMessenger. Pretty simple since all the work is done
* elsewhere.
*/
~AsyncMessenger() override;
/** @defgroup Accessors
* @{
*/
bool set_addr_unknowns(const entity_addrvec_t &addr) override;
int get_dispatch_queue_len() override {
return dispatch_queue.get_queue_len();
}
double get_dispatch_queue_max_age(utime_t now) override {
return dispatch_queue.get_max_age(now);
}
/** @} Accessors */
/**
* @defgroup Configuration functions
* @{
*/
void set_cluster_protocol(int p) override {
ceph_assert(!started && !did_bind);
cluster_protocol = p;
}
int bind(const entity_addr_t& bind_addr,
std::optional<entity_addrvec_t> public_addrs=std::nullopt) override;
int rebind(const std::set<int>& avoid_ports) override;
int bindv(const entity_addrvec_t& bind_addrs,
std::optional<entity_addrvec_t> public_addrs=std::nullopt) override;
int client_bind(const entity_addr_t& bind_addr) override;
int client_reset() override;
bool should_use_msgr2() override;
/** @} Configuration functions */
/**
* @defgroup Startup/Shutdown
* @{
*/
int start() override;
void wait() override;
int shutdown() override;
/** @} // Startup/Shutdown */
/**
* @defgroup Messaging
* @{
*/
int send_to(Message *m, int type, const entity_addrvec_t& addrs) override;
/** @} // Messaging */
/**
* @defgroup Connection Management
* @{
*/
ConnectionRef connect_to(int type,
const entity_addrvec_t& addrs,
bool anon, bool not_local_dest=false) override;
ConnectionRef get_loopback_connection() override;
void mark_down(const entity_addr_t& addr) override {
mark_down_addrs(entity_addrvec_t(addr));
}
void mark_down_addrs(const entity_addrvec_t& addrs) override;
void mark_down_all() override {
shutdown_connections(true);
}
/** @} // Connection Management */
/**
* @defgroup Inner classes
* @{
*/
/**
* @} // Inner classes
*/
protected:
/**
* @defgroup Messenger Interfaces
* @{
*/
/**
* Start up the DispatchQueue thread once we have somebody to dispatch to.
*/
void ready() override;
/** @} // Messenger Interfaces */
private:
/**
* @defgroup Utility functions
* @{
*/
/**
* Create a connection associated with the given entity (of the given type).
* Initiate the connection. (This function returning does not guarantee
* connection success.)
*
* @param addrs The address(es) of the entity to connect to.
* @param type The peer type of the entity at the address.
*
* @return a pointer to the newly-created connection. Caller does not own a
* reference; take one if you need it.
*/
AsyncConnectionRef create_connect(const entity_addrvec_t& addrs, int type,
bool anon);
void _finish_bind(const entity_addrvec_t& bind_addrs,
const entity_addrvec_t& listen_addrs);
entity_addrvec_t _filter_addrs(const entity_addrvec_t& addrs);
private:
NetworkStack *stack;
std::vector<Processor*> processors;
friend class Processor;
DispatchQueue dispatch_queue;
// the worker run messenger's cron jobs
Worker *local_worker;
std::string ms_type;
/// overall lock used for AsyncMessenger data structures
ceph::mutex lock = ceph::make_mutex("AsyncMessenger::lock");
// AsyncMessenger stuff
/// approximately unique ID set by the Constructor for use in entity_addr_t
uint64_t nonce;
/// true, specifying we haven't learned our addr; set false when we find it.
// maybe this should be protected by the lock?
bool need_addr = true;
/**
* set to bind addresses if bind or bindv were called before NetworkStack
* was ready to bind
*/
entity_addrvec_t pending_bind_addrs;
/**
* set to public addresses (those announced by the msgr's protocols).
* they are stored to handle the cases when either:
* a) bind or bindv were called before NetworkStack was ready to bind,
* b) rebind is called down the road.
*/
std::optional<entity_addrvec_t> saved_public_addrs;
/**
* false; set to true if a pending bind exists
*/
bool pending_bind = false;
/**
* The following aren't lock-protected since you shouldn't be able to race
* the only writers.
*/
/**
* false; set to true if the AsyncMessenger bound to a specific address;
* and set false again by Accepter::stop().
*/
bool did_bind = false;
/// counter for the global seq our connection protocol uses
__u32 global_seq = 0;
/// lock to protect the global_seq
ceph::spinlock global_seq_lock;
/**
* hash map of addresses to Asyncconnection
*
* NOTE: a Asyncconnection* with state CLOSED may still be in the map but is considered
* invalid and can be replaced by anyone holding the msgr lock
*/
ceph::unordered_map<entity_addrvec_t, AsyncConnectionRef> conns;
/**
* list of connection are in the process of accepting
*
* These are not yet in the conns map.
*/
std::set<AsyncConnectionRef> accepting_conns;
/// anonymous outgoing connections
std::set<AsyncConnectionRef> anon_conns;
/**
* list of connection are closed which need to be clean up
*
* Because AsyncMessenger and AsyncConnection follow a lock rule that
* we can lock AsyncMesenger::lock firstly then lock AsyncConnection::lock
* but can't reversed. This rule is aimed to avoid dead lock.
* So if AsyncConnection want to unregister itself from AsyncMessenger,
* we pick up this idea that just queue itself to this set and do lazy
* deleted for AsyncConnection. "_lookup_conn" must ensure not return a
* AsyncConnection in this set.
*/
ceph::mutex deleted_lock = ceph::make_mutex("AsyncMessenger::deleted_lock");
std::set<AsyncConnectionRef> deleted_conns;
EventCallbackRef reap_handler;
/// internal cluster protocol version, if any, for talking to entities of the same type.
int cluster_protocol = 0;
ceph::condition_variable stop_cond;
bool stopped = true;
/* You must hold this->lock for the duration of use! */
const auto& _lookup_conn(const entity_addrvec_t& k) {
static const AsyncConnectionRef nullref;
ceph_assert(ceph_mutex_is_locked(lock));
auto p = conns.find(k);
if (p == conns.end()) {
return nullref;
}
// lazy delete, see "deleted_conns"
// don't worry omit, Connection::send_message can handle this case.
if (p->second->is_unregistered()) {
std::lock_guard l{deleted_lock};
if (deleted_conns.erase(p->second)) {
p->second->get_perf_counter()->dec(l_msgr_active_connections);
conns.erase(p);
return nullref;
}
}
return p->second;
}
void _init_local_connection() {
ceph_assert(ceph_mutex_is_locked(lock));
local_connection->peer_addrs = *my_addrs;
local_connection->peer_type = my_name.type();
local_connection->set_features(CEPH_FEATURES_ALL);
ms_deliver_handle_fast_connect(local_connection.get());
}
void shutdown_connections(bool queue_reset);
public:
/// con used for sending messages to ourselves
AsyncConnectionRef local_connection;
/**
* @defgroup AsyncMessenger internals
* @{
*/
/**
* This wraps _lookup_conn.
*/
AsyncConnectionRef lookup_conn(const entity_addrvec_t& k) {
std::lock_guard l{lock};
return _lookup_conn(k); /* make new ref! */
}
int accept_conn(const AsyncConnectionRef& conn);
bool learned_addr(const entity_addr_t &peer_addr_for_me);
void add_accept(Worker *w, ConnectedSocket cli_socket,
const entity_addr_t &listen_addr,
const entity_addr_t &peer_addr);
NetworkStack *get_stack() {
return stack;
}
uint64_t get_nonce() const {
return nonce;
}
/**
* Increment the global sequence for this AsyncMessenger and return it.
* This is for the connect protocol, although it doesn't hurt if somebody
* else calls it.
*
* @return a global sequence ID that nobody else has seen.
*/
__u32 get_global_seq(__u32 old=0) {
std::lock_guard<ceph::spinlock> lg(global_seq_lock);
if (old > global_seq)
global_seq = old;
__u32 ret = ++global_seq;
return ret;
}
/**
* Get the protocol version we support for the given peer type: either
* a peer protocol (if it matches our own), the protocol version for the
* peer (if we're connecting), or our protocol version (if we're accepting).
*/
int get_proto_version(int peer_type, bool connect) const;
/**
* Fill in the address and peer type for the local connection, which
* is used for delivering messages back to ourself.
*/
void init_local_connection() {
std::lock_guard l{lock};
local_connection->is_loopback = true;
_init_local_connection();
}
/**
* Unregister connection from `conns`
*
* See "deleted_conns"
*/
void unregister_conn(const AsyncConnectionRef& conn) {
std::lock_guard l{deleted_lock};
deleted_conns.emplace(std::move(conn));
conn->unregister();
if (deleted_conns.size() >= cct->_conf->ms_async_reap_threshold) {
local_worker->center.dispatch_event_external(reap_handler);
}
}
/**
* Reap dead connection from `deleted_conns`
*
* @return the number of dead connections
*
* See "deleted_conns"
*/
void reap_dead();
/**
* @} // AsyncMessenger Internals
*/
} ;
#endif /* CEPH_ASYNCMESSENGER_H */
| 11,699 | 25.958525 | 90 |
h
|
null |
ceph-main/src/msg/async/Event.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "common/errno.h"
#include "Event.h"
#ifdef HAVE_DPDK
#include "dpdk/EventDPDK.h"
#endif
#ifdef HAVE_EPOLL
#include "EventEpoll.h"
#else
#ifdef HAVE_KQUEUE
#include "EventKqueue.h"
#else
#ifdef HAVE_POLL
#include "EventPoll.h"
#else
#include "EventSelect.h"
#endif
#endif
#endif
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "EventCallback "
class C_handle_notify : public EventCallback {
EventCenter *center;
CephContext *cct;
public:
C_handle_notify(EventCenter *c, CephContext *cc): center(c), cct(cc) {}
void do_request(uint64_t fd_or_id) override {
char c[256];
int r = 0;
do {
#ifdef _WIN32
r = recv(fd_or_id, c, sizeof(c), 0);
#else
r = read(fd_or_id, c, sizeof(c));
#endif
if (r < 0) {
if (ceph_sock_errno() != EAGAIN)
ldout(cct, 1) << __func__ << " read notify pipe failed: " << cpp_strerror(ceph_sock_errno()) << dendl;
}
} while (r > 0);
}
};
#undef dout_prefix
#define dout_prefix _event_prefix(_dout)
/**
* Construct a Poller.
*
* \param center
* EventCenter object through which the poller will be invoked (defaults
* to the global #RAMCloud::center object).
* \param pollerName
* Human readable name that can be printed out in debugging messages
* about the poller. The name of the superclass is probably sufficient
* for most cases.
*/
EventCenter::Poller::Poller(EventCenter* center, const std::string& name)
: owner(center), poller_name(name), slot(owner->pollers.size())
{
owner->pollers.push_back(this);
}
/**
* Destroy a Poller.
*/
EventCenter::Poller::~Poller()
{
// Erase this Poller from the vector by overwriting it with the
// poller that used to be the last one in the vector.
//
// Note: this approach is reentrant (it is safe to delete a
// poller from a poller callback, which means that the poll
// method is in the middle of scanning the list of all pollers;
// the worst that will happen is that the poller that got moved
// may not be invoked in the current scan).
owner->pollers[slot] = owner->pollers.back();
owner->pollers[slot]->slot = slot;
owner->pollers.pop_back();
slot = -1;
}
std::ostream& EventCenter::_event_prefix(std::ostream *_dout)
{
return *_dout << "Event(" << this << " nevent=" << nevent
<< " time_id=" << time_event_next_id << ").";
}
int EventCenter::init(int nevent, unsigned center_id, const std::string &type)
{
// can't init multi times
ceph_assert(this->nevent == 0);
this->type = type;
this->center_id = center_id;
if (type == "dpdk") {
#ifdef HAVE_DPDK
driver = new DPDKDriver(cct);
#endif
} else {
#ifdef HAVE_EPOLL
driver = new EpollDriver(cct);
#else
#ifdef HAVE_KQUEUE
driver = new KqueueDriver(cct);
#else
#ifdef HAVE_POLL
driver = new PollDriver(cct);
#else
driver = new SelectDriver(cct);
#endif
#endif
#endif
}
if (!driver) {
lderr(cct) << __func__ << " failed to create event driver " << dendl;
return -1;
}
int r = driver->init(this, nevent);
if (r < 0) {
lderr(cct) << __func__ << " failed to init event driver." << dendl;
return r;
}
file_events.resize(nevent);
this->nevent = nevent;
if (!driver->need_wakeup())
return 0;
int fds[2];
#ifdef _WIN32
if (win_socketpair(fds) < 0) {
#else
if (pipe_cloexec(fds, 0) < 0) {
#endif
int e = ceph_sock_errno();
lderr(cct) << __func__ << " can't create notify pipe: " << cpp_strerror(e) << dendl;
return -e;
}
notify_receive_fd = fds[0];
notify_send_fd = fds[1];
r = net.set_nonblock(notify_receive_fd);
if (r < 0) {
return r;
}
r = net.set_nonblock(notify_send_fd);
if (r < 0) {
return r;
}
return r;
}
EventCenter::~EventCenter()
{
{
std::lock_guard<std::mutex> l(external_lock);
while (!external_events.empty()) {
EventCallbackRef e = external_events.front();
if (e)
e->do_request(0);
external_events.pop_front();
}
}
time_events.clear();
//assert(time_events.empty());
if (notify_receive_fd >= 0)
compat_closesocket(notify_receive_fd);
if (notify_send_fd >= 0)
compat_closesocket(notify_send_fd);
delete driver;
if (notify_handler)
delete notify_handler;
}
void EventCenter::set_owner()
{
owner = pthread_self();
ldout(cct, 2) << __func__ << " center_id=" << center_id << " owner=" << owner << dendl;
if (!global_centers) {
global_centers = &cct->lookup_or_create_singleton_object<
EventCenter::AssociatedCenters>(
"AsyncMessenger::EventCenter::global_center::" + type, true);
ceph_assert(global_centers);
global_centers->centers[center_id] = this;
if (driver->need_wakeup()) {
notify_handler = new C_handle_notify(this, cct);
int r = create_file_event(notify_receive_fd, EVENT_READABLE, notify_handler);
ceph_assert(r == 0);
}
}
}
int EventCenter::create_file_event(int fd, int mask, EventCallbackRef ctxt)
{
ceph_assert(in_thread());
int r = 0;
if (fd >= nevent) {
int new_size = nevent << 2;
while (fd >= new_size)
new_size <<= 2;
ldout(cct, 20) << __func__ << " event count exceed " << nevent << ", expand to " << new_size << dendl;
r = driver->resize_events(new_size);
if (r < 0) {
lderr(cct) << __func__ << " event count is exceed." << dendl;
return -ERANGE;
}
file_events.resize(new_size);
nevent = new_size;
}
EventCenter::FileEvent *event = _get_file_event(fd);
ldout(cct, 20) << __func__ << " create event started fd=" << fd << " mask=" << mask
<< " original mask is " << event->mask << dendl;
if (event->mask == mask)
return 0;
r = driver->add_event(fd, event->mask, mask);
if (r < 0) {
// Actually we don't allow any failed error code, caller doesn't prepare to
// handle error status. So now we need to assert failure here. In practice,
// add_event shouldn't report error, otherwise it must be a innermost bug!
lderr(cct) << __func__ << " add event failed, ret=" << r << " fd=" << fd
<< " mask=" << mask << " original mask is " << event->mask << dendl;
ceph_abort_msg("BUG!");
return r;
}
event->mask |= mask;
if (mask & EVENT_READABLE) {
event->read_cb = ctxt;
}
if (mask & EVENT_WRITABLE) {
event->write_cb = ctxt;
}
ldout(cct, 20) << __func__ << " create event end fd=" << fd << " mask=" << mask
<< " current mask is " << event->mask << dendl;
return 0;
}
void EventCenter::delete_file_event(int fd, int mask)
{
ceph_assert(in_thread() && fd >= 0);
if (fd >= nevent) {
ldout(cct, 1) << __func__ << " delete event fd=" << fd << " is equal or greater than nevent=" << nevent
<< "mask=" << mask << dendl;
return ;
}
EventCenter::FileEvent *event = _get_file_event(fd);
ldout(cct, 30) << __func__ << " delete event started fd=" << fd << " mask=" << mask
<< " original mask is " << event->mask << dendl;
if (!event->mask)
return ;
int r = driver->del_event(fd, event->mask, mask);
if (r < 0) {
// see create_file_event
ceph_abort_msg("BUG!");
}
if (mask & EVENT_READABLE && event->read_cb) {
event->read_cb = nullptr;
}
if (mask & EVENT_WRITABLE && event->write_cb) {
event->write_cb = nullptr;
}
event->mask = event->mask & (~mask);
ldout(cct, 30) << __func__ << " delete event end fd=" << fd << " mask=" << mask
<< " current mask is " << event->mask << dendl;
}
uint64_t EventCenter::create_time_event(uint64_t microseconds, EventCallbackRef ctxt)
{
ceph_assert(in_thread());
uint64_t id = time_event_next_id++;
ldout(cct, 30) << __func__ << " id=" << id << " trigger after " << microseconds << "us"<< dendl;
EventCenter::TimeEvent event;
clock_type::time_point expire = clock_type::now() + std::chrono::microseconds(microseconds);
event.id = id;
event.time_cb = ctxt;
std::multimap<clock_type::time_point, TimeEvent>::value_type s_val(expire, event);
auto it = time_events.insert(std::move(s_val));
event_map[id] = it;
return id;
}
void EventCenter::delete_time_event(uint64_t id)
{
ceph_assert(in_thread());
ldout(cct, 30) << __func__ << " id=" << id << dendl;
if (id >= time_event_next_id || id == 0)
return ;
auto it = event_map.find(id);
if (it == event_map.end()) {
ldout(cct, 10) << __func__ << " id=" << id << " not found" << dendl;
return ;
}
time_events.erase(it->second);
event_map.erase(it);
}
void EventCenter::wakeup()
{
// No need to wake up since we never sleep
if (!pollers.empty() || !driver->need_wakeup())
return ;
ldout(cct, 20) << __func__ << dendl;
char buf = 'c';
// wake up "event_wait"
#ifdef _WIN32
int n = send(notify_send_fd, &buf, sizeof(buf), 0);
#else
int n = write(notify_send_fd, &buf, sizeof(buf));
#endif
if (n < 0) {
if (ceph_sock_errno() != EAGAIN) {
ldout(cct, 1) << __func__ << " write notify pipe failed: "
<< cpp_strerror(ceph_sock_errno()) << dendl;
ceph_abort();
}
}
}
int EventCenter::process_time_events()
{
int processed = 0;
clock_type::time_point now = clock_type::now();
using ceph::operator <<;
ldout(cct, 30) << __func__ << " cur time is " << now << dendl;
while (!time_events.empty()) {
auto it = time_events.begin();
if (now >= it->first) {
TimeEvent &e = it->second;
EventCallbackRef cb = e.time_cb;
uint64_t id = e.id;
time_events.erase(it);
event_map.erase(id);
ldout(cct, 30) << __func__ << " process time event: id=" << id << dendl;
processed++;
cb->do_request(id);
} else {
break;
}
}
return processed;
}
int EventCenter::process_events(unsigned timeout_microseconds, ceph::timespan *working_dur)
{
struct timeval tv;
int numevents;
bool trigger_time = false;
auto now = clock_type::now();
clock_type::time_point end_time = now + std::chrono::microseconds(timeout_microseconds);
auto it = time_events.begin();
if (it != time_events.end() && end_time >= it->first) {
trigger_time = true;
end_time = it->first;
if (end_time > now) {
timeout_microseconds = std::chrono::duration_cast<std::chrono::microseconds>(end_time - now).count();
} else {
timeout_microseconds = 0;
}
}
bool blocking = pollers.empty() && !external_num_events.load();
if (!blocking)
timeout_microseconds = 0;
tv.tv_sec = timeout_microseconds / 1000000;
tv.tv_usec = timeout_microseconds % 1000000;
ldout(cct, 30) << __func__ << " wait second " << tv.tv_sec << " usec " << tv.tv_usec << dendl;
std::vector<FiredFileEvent> fired_events;
numevents = driver->event_wait(fired_events, &tv);
auto working_start = ceph::mono_clock::now();
for (int event_id = 0; event_id < numevents; event_id++) {
int rfired = 0;
FileEvent *event;
EventCallbackRef cb;
event = _get_file_event(fired_events[event_id].fd);
/* note the event->mask & mask & ... code: maybe an already processed
* event removed an element that fired and we still didn't
* processed, so we check if the event is still valid. */
if (event->mask & fired_events[event_id].mask & EVENT_READABLE) {
rfired = 1;
cb = event->read_cb;
cb->do_request(fired_events[event_id].fd);
}
if (event->mask & fired_events[event_id].mask & EVENT_WRITABLE) {
if (!rfired || event->read_cb != event->write_cb) {
cb = event->write_cb;
cb->do_request(fired_events[event_id].fd);
}
}
ldout(cct, 30) << __func__ << " event_wq process is " << fired_events[event_id].fd
<< " mask is " << fired_events[event_id].mask << dendl;
}
if (trigger_time)
numevents += process_time_events();
if (external_num_events.load()) {
external_lock.lock();
std::deque<EventCallbackRef> cur_process;
cur_process.swap(external_events);
external_num_events.store(0);
external_lock.unlock();
numevents += cur_process.size();
while (!cur_process.empty()) {
EventCallbackRef e = cur_process.front();
ldout(cct, 30) << __func__ << " do " << e << dendl;
e->do_request(0);
cur_process.pop_front();
}
}
if (!numevents && !blocking) {
for (uint32_t i = 0; i < pollers.size(); i++)
numevents += pollers[i]->poll();
}
if (working_dur)
*working_dur = ceph::mono_clock::now() - working_start;
return numevents;
}
void EventCenter::dispatch_event_external(EventCallbackRef e)
{
uint64_t num = 0;
{
std::lock_guard lock{external_lock};
if (external_num_events > 0 && *external_events.rbegin() == e) {
return;
}
external_events.push_back(e);
num = ++external_num_events;
}
if (num == 1 && !in_thread())
wakeup();
ldout(cct, 30) << __func__ << " " << e << " pending " << num << dendl;
}
| 13,448 | 26.559426 | 112 |
cc
|
null |
ceph-main/src/msg/async/Event.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_EVENT_H
#define CEPH_MSG_EVENT_H
#ifdef __APPLE__
#include <AvailabilityMacros.h>
#endif
// We use epoll, kqueue, evport, select in descending order by performance.
#if defined(__linux__)
#define HAVE_EPOLL 1
#endif
#if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__)
#define HAVE_KQUEUE 1
#endif
#ifdef _WIN32
#define HAVE_POLL 1
#endif
#ifdef __sun
#include <sys/feature_tests.h>
#ifdef _DTRACE_VERSION
#define HAVE_EVPORT 1
#endif
#endif
#include <atomic>
#include <mutex>
#include <condition_variable>
#include "common/ceph_time.h"
#include "common/dout.h"
#include "net_handler.h"
#define EVENT_NONE 0
#define EVENT_READABLE 1
#define EVENT_WRITABLE 2
class EventCenter;
class EventCallback {
public:
virtual void do_request(uint64_t fd_or_id) = 0;
virtual ~EventCallback() {} // we want a virtual destructor!!!
};
typedef EventCallback* EventCallbackRef;
struct FiredFileEvent {
int fd;
int mask;
};
/*
* EventDriver is a wrap of event mechanisms depends on different OS.
* For example, Linux will use epoll(2), BSD will use kqueue(2) and select will
* be used for worst condition.
*/
class EventDriver {
public:
virtual ~EventDriver() {} // we want a virtual destructor!!!
virtual int init(EventCenter *center, int nevent) = 0;
virtual int add_event(int fd, int cur_mask, int mask) = 0;
virtual int del_event(int fd, int cur_mask, int del_mask) = 0;
virtual int event_wait(std::vector<FiredFileEvent> &fired_events, struct timeval *tp) = 0;
virtual int resize_events(int newsize) = 0;
virtual bool need_wakeup() { return true; }
};
/*
* EventCenter maintain a set of file descriptor and handle registered events.
*/
class EventCenter {
public:
// should be enough;
static const int MAX_EVENTCENTER = 24;
private:
using clock_type = ceph::coarse_mono_clock;
struct AssociatedCenters {
EventCenter *centers[MAX_EVENTCENTER];
AssociatedCenters() {
// FIPS zeroization audit 20191115: this memset is not security related.
memset(centers, 0, MAX_EVENTCENTER * sizeof(EventCenter*));
}
};
struct FileEvent {
int mask;
EventCallbackRef read_cb;
EventCallbackRef write_cb;
FileEvent(): mask(0), read_cb(NULL), write_cb(NULL) {}
};
struct TimeEvent {
uint64_t id;
EventCallbackRef time_cb;
TimeEvent(): id(0), time_cb(NULL) {}
};
public:
/**
* A Poller object is invoked once each time through the dispatcher's
* inner polling loop.
*/
class Poller {
public:
explicit Poller(EventCenter* center, const std::string& pollerName);
virtual ~Poller();
/**
* This method is defined by a subclass and invoked once by the
* center during each pass through its inner polling loop.
*
* \return
* 1 means that this poller did useful work during this call.
* 0 means that the poller found no work to do.
*/
virtual int poll() = 0;
private:
/// The EventCenter object that owns this Poller. NULL means the
/// EventCenter has been deleted.
EventCenter* owner;
/// Human-readable string name given to the poller to make it
/// easy to identify for debugging. For most pollers just passing
/// in the subclass name probably makes sense.
std::string poller_name;
/// Index of this Poller in EventCenter::pollers. Allows deletion
/// without having to scan all the entries in pollers. -1 means
/// this poller isn't currently in EventCenter::pollers (happens
/// after EventCenter::reset).
int slot;
};
private:
CephContext *cct;
std::string type;
int nevent;
// Used only to external event
pthread_t owner = 0;
std::mutex external_lock;
std::atomic_ulong external_num_events;
std::deque<EventCallbackRef> external_events;
std::vector<FileEvent> file_events;
EventDriver *driver;
std::multimap<clock_type::time_point, TimeEvent> time_events;
// Keeps track of all of the pollers currently defined. We don't
// use an intrusive list here because it isn't reentrant: we need
// to add/remove elements while the center is traversing the list.
std::vector<Poller*> pollers;
std::map<uint64_t, std::multimap<clock_type::time_point, TimeEvent>::iterator> event_map;
uint64_t time_event_next_id;
int notify_receive_fd;
int notify_send_fd;
ceph::NetHandler net;
EventCallbackRef notify_handler;
unsigned center_id;
AssociatedCenters *global_centers = nullptr;
int process_time_events();
FileEvent *_get_file_event(int fd) {
ceph_assert(fd < nevent);
return &file_events[fd];
}
public:
explicit EventCenter(CephContext *c):
cct(c), nevent(0),
external_num_events(0),
driver(NULL), time_event_next_id(1),
notify_receive_fd(-1), notify_send_fd(-1), net(c),
notify_handler(NULL), center_id(0) { }
~EventCenter();
std::ostream& _event_prefix(std::ostream *_dout);
int init(int nevent, unsigned center_id, const std::string &type);
void set_owner();
pthread_t get_owner() const { return owner; }
unsigned get_id() const { return center_id; }
EventDriver *get_driver() { return driver; }
// Used by internal thread
int create_file_event(int fd, int mask, EventCallbackRef ctxt);
uint64_t create_time_event(uint64_t milliseconds, EventCallbackRef ctxt);
void delete_file_event(int fd, int mask);
void delete_time_event(uint64_t id);
int process_events(unsigned timeout_microseconds, ceph::timespan *working_dur = nullptr);
void wakeup();
// Used by external thread
void dispatch_event_external(EventCallbackRef e);
inline bool in_thread() const {
return pthread_equal(pthread_self(), owner);
}
private:
template <typename func>
class C_submit_event : public EventCallback {
std::mutex lock;
std::condition_variable cond;
bool done = false;
func f;
bool nonwait;
public:
C_submit_event(func &&_f, bool nowait)
: f(std::move(_f)), nonwait(nowait) {}
void do_request(uint64_t id) override {
f();
lock.lock();
cond.notify_all();
done = true;
bool del = nonwait;
lock.unlock();
if (del)
delete this;
}
void wait() {
ceph_assert(!nonwait);
std::unique_lock<std::mutex> l(lock);
while (!done)
cond.wait(l);
}
};
public:
template <typename func>
void submit_to(int i, func &&f, bool always_async = false) {
ceph_assert(i < MAX_EVENTCENTER && global_centers);
EventCenter *c = global_centers->centers[i];
ceph_assert(c);
if (always_async) {
C_submit_event<func> *event = new C_submit_event<func>(std::move(f), true);
c->dispatch_event_external(event);
} else if (c->in_thread()) {
f();
return;
} else {
C_submit_event<func> event(std::move(f), false);
c->dispatch_event_external(&event);
event.wait();
}
};
};
#endif
| 7,469 | 26.666667 | 130 |
h
|
null |
ceph-main/src/msg/async/EventEpoll.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include <fcntl.h>
#include "EventEpoll.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "EpollDriver."
int EpollDriver::init(EventCenter *c, int nevent)
{
events = (struct epoll_event*)calloc(nevent, sizeof(struct epoll_event));
if (!events) {
lderr(cct) << __func__ << " unable to malloc memory. " << dendl;
return -ENOMEM;
}
epfd = epoll_create(1024); /* 1024 is just an hint for the kernel */
if (epfd == -1) {
lderr(cct) << __func__ << " unable to do epoll_create: "
<< cpp_strerror(errno) << dendl;
return -errno;
}
if (::fcntl(epfd, F_SETFD, FD_CLOEXEC) == -1) {
int e = errno;
::close(epfd);
lderr(cct) << __func__ << " unable to set cloexec: "
<< cpp_strerror(e) << dendl;
return -e;
}
this->nevent = nevent;
return 0;
}
int EpollDriver::add_event(int fd, int cur_mask, int add_mask)
{
ldout(cct, 20) << __func__ << " add event fd=" << fd << " cur_mask=" << cur_mask
<< " add_mask=" << add_mask << " to " << epfd << dendl;
struct epoll_event ee;
/* If the fd was already monitored for some event, we need a MOD
* operation. Otherwise we need an ADD operation. */
int op;
op = cur_mask == EVENT_NONE ? EPOLL_CTL_ADD: EPOLL_CTL_MOD;
ee.events = EPOLLET;
add_mask |= cur_mask; /* Merge old events */
if (add_mask & EVENT_READABLE)
ee.events |= EPOLLIN;
if (add_mask & EVENT_WRITABLE)
ee.events |= EPOLLOUT;
ee.data.u64 = 0; /* avoid valgrind warning */
ee.data.fd = fd;
if (epoll_ctl(epfd, op, fd, &ee) == -1) {
lderr(cct) << __func__ << " epoll_ctl: add fd=" << fd << " failed. "
<< cpp_strerror(errno) << dendl;
return -errno;
}
return 0;
}
int EpollDriver::del_event(int fd, int cur_mask, int delmask)
{
ldout(cct, 20) << __func__ << " del event fd=" << fd << " cur_mask=" << cur_mask
<< " delmask=" << delmask << " to " << epfd << dendl;
struct epoll_event ee = {0};
int mask = cur_mask & (~delmask);
int r = 0;
if (mask != EVENT_NONE) {
ee.events = EPOLLET;
ee.data.fd = fd;
if (mask & EVENT_READABLE)
ee.events |= EPOLLIN;
if (mask & EVENT_WRITABLE)
ee.events |= EPOLLOUT;
if ((r = epoll_ctl(epfd, EPOLL_CTL_MOD, fd, &ee)) < 0) {
lderr(cct) << __func__ << " epoll_ctl: modify fd=" << fd << " mask=" << mask
<< " failed." << cpp_strerror(errno) << dendl;
return -errno;
}
} else {
/* Note, Kernel < 2.6.9 requires a non null event pointer even for
* EPOLL_CTL_DEL. */
if ((r = epoll_ctl(epfd, EPOLL_CTL_DEL, fd, &ee)) < 0) {
lderr(cct) << __func__ << " epoll_ctl: delete fd=" << fd
<< " failed." << cpp_strerror(errno) << dendl;
return -errno;
}
}
return 0;
}
int EpollDriver::resize_events(int newsize)
{
return 0;
}
int EpollDriver::event_wait(std::vector<FiredFileEvent> &fired_events, struct timeval *tvp)
{
int retval, numevents = 0;
retval = epoll_wait(epfd, events, nevent,
tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1);
if (retval > 0) {
numevents = retval;
fired_events.resize(numevents);
for (int event_id = 0; event_id < numevents; event_id++) {
int mask = 0;
struct epoll_event *e = &events[event_id];
if (e->events & EPOLLIN) mask |= EVENT_READABLE;
if (e->events & EPOLLOUT) mask |= EVENT_WRITABLE;
if (e->events & EPOLLERR) mask |= EVENT_READABLE|EVENT_WRITABLE;
if (e->events & EPOLLHUP) mask |= EVENT_READABLE|EVENT_WRITABLE;
fired_events[event_id].fd = e->data.fd;
fired_events[event_id].mask = mask;
}
}
return numevents;
}
| 4,238 | 28.643357 | 91 |
cc
|
null |
ceph-main/src/msg/async/EventEpoll.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_EVENTEPOLL_H
#define CEPH_MSG_EVENTEPOLL_H
#include <unistd.h>
#include <sys/epoll.h>
#include "Event.h"
class EpollDriver : public EventDriver {
int epfd;
struct epoll_event *events;
CephContext *cct;
int nevent;
public:
explicit EpollDriver(CephContext *c): epfd(-1), events(NULL), cct(c), nevent(0) {}
~EpollDriver() override {
if (epfd != -1)
close(epfd);
if (events)
free(events);
}
int init(EventCenter *c, int nevent) override;
int add_event(int fd, int cur_mask, int add_mask) override;
int del_event(int fd, int cur_mask, int del_mask) override;
int resize_events(int newsize) override;
int event_wait(std::vector<FiredFileEvent> &fired_events,
struct timeval *tp) override;
};
#endif
| 1,244 | 23.9 | 84 |
h
|
null |
ceph-main/src/msg/async/EventKqueue.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include "EventKqueue.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "KqueueDriver."
#define KEVENT_NOWAIT 0
int KqueueDriver::test_kqfd() {
struct kevent ke[1];
if (kevent(kqfd, ke, 0, NULL, 0, KEVENT_NOWAIT) == -1) {
ldout(cct,0) << __func__ << " invalid kqfd = " << kqfd
<< cpp_strerror(errno) << dendl;
return -errno;
}
return kqfd;
}
int KqueueDriver::restore_events() {
struct kevent ke[2];
int i;
ldout(cct,30) << __func__ << " on kqfd = " << kqfd << dendl;
for(i=0;i<size;i++) {
int num = 0;
if (sav_events[i].mask == 0 )
continue;
ldout(cct,30) << __func__ << " restore kqfd = " << kqfd
<< " fd = " << i << " mask " << sav_events[i].mask << dendl;
if (sav_events[i].mask & EVENT_READABLE)
EV_SET(&ke[num++], i, EVFILT_READ, EV_ADD, 0, 0, NULL);
if (sav_events[i].mask & EVENT_WRITABLE)
EV_SET(&ke[num++], i, EVFILT_WRITE, EV_ADD, 0, 0, NULL);
if (num) {
if (kevent(kqfd, ke, num, NULL, 0, KEVENT_NOWAIT) == -1) {
ldout(cct,0) << __func__ << " unable to add event: "
<< cpp_strerror(errno) << dendl;
return -errno;
}
}
}
return 0;
}
int KqueueDriver::test_thread_change(const char* funcname) {
// check to see if we changed thread, because that invalidates
// the kqfd and we need to restore that
int oldkqfd = kqfd;
if (!pthread_equal(mythread, pthread_self())) {
ldout(cct,20) << funcname << " We changed thread from " << mythread
<< " to " << pthread_self() << dendl;
mythread = pthread_self();
kqfd = -1;
} else if ((kqfd != -1) && (test_kqfd() < 0)) {
// should this ever happen?
// It would be strange to change kqfd with thread change.
// Might nee to change this into an ceph_assert() in the future.
ldout(cct,0) << funcname << " Warning: Recreating old kqfd. "
<< "This should not happen!!!" << dendl;
kqfd = -1;
}
if (kqfd == -1) {
kqfd = kqueue();
ldout(cct,30) << funcname << " kqueue: new kqfd = " << kqfd
<< " (was: " << oldkqfd << ")"
<< dendl;
if (kqfd < 0) {
lderr(cct) << funcname << " unable to do kqueue: "
<< cpp_strerror(errno) << dendl;
return -errno;
}
if (restore_events()< 0) {
lderr(cct) << funcname << " unable restore all events "
<< cpp_strerror(errno) << dendl;
return -errno;
}
}
return 0;
}
int KqueueDriver::init(EventCenter *c, int nevent)
{
// keep track of possible changes of our thread
// because change of thread kills the kqfd
mythread = pthread_self();
// Reserve the space to accept the kevent return events.
res_events = (struct kevent*)malloc(sizeof(struct kevent)*nevent);
if (!res_events) {
lderr(cct) << __func__ << " unable to malloc memory: "
<< cpp_strerror(errno) << dendl;
return -ENOMEM;
}
memset(res_events, 0, sizeof(struct kevent)*nevent);
size = nevent;
// Reserve the space to keep all of the events set, so it can be redone
// when we change trhread ID.
sav_events = (struct SaveEvent*)malloc(sizeof(struct SaveEvent)*nevent);
if (!sav_events) {
lderr(cct) << __func__ << " unable to malloc memory: "
<< cpp_strerror(errno) << dendl;
return -ENOMEM;
}
memset(sav_events, 0, sizeof(struct SaveEvent)*nevent);
sav_max = nevent;
// Delay assigning a descriptor until it is really needed.
// kqfd = kqueue();
kqfd = -1;
return 0;
}
int KqueueDriver::add_event(int fd, int cur_mask, int add_mask)
{
struct kevent ke[2];
int num = 0;
ldout(cct,30) << __func__ << " add event kqfd = " << kqfd << " fd = " << fd
<< " cur_mask = " << cur_mask << " add_mask = " << add_mask
<< dendl;
int r = test_thread_change(__func__);
if ( r < 0 )
return r;
if (add_mask & EVENT_READABLE)
EV_SET(&ke[num++], fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, NULL);
if (add_mask & EVENT_WRITABLE)
EV_SET(&ke[num++], fd, EVFILT_WRITE, EV_ADD|EV_CLEAR, 0, 0, NULL);
if (num) {
if (kevent(kqfd, ke, num, NULL, 0, KEVENT_NOWAIT) == -1) {
lderr(cct) << __func__ << " unable to add event: "
<< cpp_strerror(errno) << dendl;
return -errno;
}
}
// keep what we set
if (fd >= sav_max)
resize_events(sav_max+5000);
sav_events[fd].mask = cur_mask | add_mask;
return 0;
}
int KqueueDriver::del_event(int fd, int cur_mask, int del_mask)
{
struct kevent ke[2];
int num = 0;
int mask = cur_mask & del_mask;
ldout(cct,30) << __func__ << " delete event kqfd = " << kqfd
<< " fd = " << fd << " cur_mask = " << cur_mask
<< " del_mask = " << del_mask << dendl;
int r = test_thread_change(__func__);
if ( r < 0 )
return r;
if (mask & EVENT_READABLE)
EV_SET(&ke[num++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
if (mask & EVENT_WRITABLE)
EV_SET(&ke[num++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
if (num) {
int r = 0;
if ((r = kevent(kqfd, ke, num, NULL, 0, KEVENT_NOWAIT)) < 0) {
lderr(cct) << __func__ << " kevent: delete fd=" << fd << " mask=" << mask
<< " failed." << cpp_strerror(errno) << dendl;
return -errno;
}
}
// keep the administration
sav_events[fd].mask = cur_mask & ~del_mask;
return 0;
}
int KqueueDriver::resize_events(int newsize)
{
ldout(cct,30) << __func__ << " kqfd = " << kqfd << "newsize = " << newsize
<< dendl;
if (newsize > sav_max) {
sav_events = (struct SaveEvent*)realloc(sav_events, sizeof(struct SaveEvent)*newsize);
if (!sav_events) {
lderr(cct) << __func__ << " unable to realloc memory: "
<< cpp_strerror(errno) << dendl;
ceph_assert(sav_events);
return -ENOMEM;
}
memset(&sav_events[size], 0, sizeof(struct SaveEvent)*(newsize-sav_max));
sav_max = newsize;
}
return 0;
}
int KqueueDriver::event_wait(std::vector<FiredFileEvent> &fired_events,
struct timeval *tvp)
{
int retval, numevents = 0;
struct timespec timeout;
ldout(cct,10) << __func__ << " kqfd = " << kqfd << dendl;
int r = test_thread_change(__func__);
if ( r < 0 )
return r;
if (tvp != NULL) {
timeout.tv_sec = tvp->tv_sec;
timeout.tv_nsec = tvp->tv_usec * 1000;
ldout(cct,20) << __func__ << " "
<< timeout.tv_sec << " sec "
<< timeout.tv_nsec << " nsec"
<< dendl;
retval = kevent(kqfd, NULL, 0, res_events, size, &timeout);
} else {
ldout(cct,30) << __func__ << " event_wait: " << " NULL" << dendl;
retval = kevent(kqfd, NULL, 0, res_events, size, KEVENT_NOWAIT);
}
ldout(cct,25) << __func__ << " kevent retval: " << retval << dendl;
if (retval < 0) {
lderr(cct) << __func__ << " kqueue error: "
<< cpp_strerror(errno) << dendl;
return -errno;
} else if (retval == 0) {
ldout(cct,5) << __func__ << " Hit timeout("
<< timeout.tv_sec << " sec "
<< timeout.tv_nsec << " nsec"
<< ")." << dendl;
} else {
int j;
numevents = retval;
fired_events.resize(numevents);
for (j = 0; j < numevents; j++) {
int mask = 0;
struct kevent *e = res_events + j;
if (e->filter == EVFILT_READ) mask |= EVENT_READABLE;
if (e->filter == EVFILT_WRITE) mask |= EVENT_WRITABLE;
if (e->flags & EV_ERROR) mask |= EVENT_READABLE|EVENT_WRITABLE;
fired_events[j].fd = (int)e->ident;
fired_events[j].mask = mask;
}
}
return numevents;
}
| 8,181 | 29.416357 | 90 |
cc
|
null |
ceph-main/src/msg/async/EventKqueue.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_EVENTKQUEUE_H
#define CEPH_MSG_EVENTKQUEUE_H
#include <sys/types.h>
#include <sys/event.h>
#include <unistd.h>
#include "Event.h"
class KqueueDriver : public EventDriver {
int kqfd;
pthread_t mythread;
struct kevent *res_events;
CephContext *cct;
int size;
// Keep what we set on the kqfd
struct SaveEvent{
int fd;
int mask;
};
struct SaveEvent *sav_events;
int sav_max;
int restore_events();
int test_kqfd();
int test_thread_change(const char* funcname);
public:
explicit KqueueDriver(CephContext *c): kqfd(-1), res_events(NULL), cct(c),
size(0), sav_max(0) {}
virtual ~KqueueDriver() {
if (kqfd != -1)
close(kqfd);
if (res_events)
free(res_events);
size = 0;
if (sav_events)
free(sav_events);
sav_max = 0;
}
int init(EventCenter *c, int nevent) override;
int add_event(int fd, int cur_mask, int add_mask) override;
int del_event(int fd, int cur_mask, int del_mask) override;
int resize_events(int newsize) override;
int event_wait(std::vector<FiredFileEvent> &fired_events,
struct timeval *tp) override;
};
#endif
| 1,614 | 22.75 | 77 |
h
|
null |
ceph-main/src/msg/async/EventPoll.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Rafael Lopez <[email protected]>
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include "EventPoll.h"
#include <unistd.h>
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "PollDriver."
#ifndef POLL_ADD
#define POLL_ADD 1
#ifndef POLL_MOD
#define POLL_MOD 2
#ifndef POLL_DEL
#define POLL_DEL 3
#endif
#endif
#endif
int PollDriver::init(EventCenter *c, int nevent) {
// pfds array will auto scale up to hard_max_pfds, which should be
// greater than total daemons/op_threads (todo: cfg option?)
hard_max_pfds = 8192;
// 128 seems a good starting point, cover clusters up to ~350 OSDs
// with default ms_async_op_threads
max_pfds = 128;
pfds = (POLLFD*)calloc(max_pfds, sizeof(POLLFD));
if (!pfds) {
lderr(cct) << __func__ << " unable to allocate memory " << dendl;
return -ENOMEM;
}
//initialise pfds
for(int i = 0; i < max_pfds; i++){
pfds[i].fd = -1;
pfds[i].events = 0;
pfds[i].revents = 0;
}
return 0;
}
// Helper func to register/unregister interest in a FD's events by
// manipulating it's entry in pfds array
int PollDriver::poll_ctl(int fd, int op, int events) {
int pos = 0;
if (op == POLL_ADD) {
// Find an empty pollfd slot
for(pos = 0; pos < max_pfds ; pos++){
if(pfds[pos].fd == -1){
pfds[pos].fd = fd;
pfds[pos].events = events;
pfds[pos].revents = 0;
return 0;
}
}
// We ran out of slots, try to increase
if (max_pfds < hard_max_pfds) {
ldout(cct, 10) << __func__ << " exhausted pollfd slots"
<< ", doubling to " << max_pfds*2 << dendl;
pfds = (POLLFD*)realloc(pfds, max_pfds*2*sizeof(POLLFD));
if (!pfds) {
lderr(cct) << __func__ << " unable to realloc for more pollfd slots"
<< dendl;
return -ENOMEM;
}
// Initialise new slots
for (int i = max_pfds ; i < max_pfds*2 ; i++){
pfds[i].fd = -1;
pfds[i].events = 0;
pfds[i].revents = 0;
}
max_pfds = max_pfds*2;
pfds[pos].fd = fd;
pfds[pos].events = events;
pfds[pos].revents = 0;
return 0;
} else {
// Hit hard limit
lderr(cct) << __func__ << " hard limit for file descriptors per op"
<< " thread reached (" << hard_max_pfds << ")" << dendl;
return -EMFILE;
}
} else if (op == POLL_MOD) {
for (pos = 0; pos < max_pfds; pos++ ){
if (pfds[pos].fd == fd) {
pfds[pos].events = events;
return 0;
}
}
} else if (op == POLL_DEL) {
for (pos = 0; pos < max_pfds; pos++ ){
if (pfds[pos].fd == fd) {
pfds[pos].fd = -1;
pfds[pos].events = 0;
return 0;
}
}
}
return 0;
}
int PollDriver::add_event(int fd, int cur_mask, int add_mask) {
ldout(cct, 10) << __func__ << " add event to fd=" << fd << " mask="
<< add_mask << dendl;
int op, events = 0;
op = cur_mask == EVENT_NONE ? POLL_ADD: POLL_MOD;
add_mask |= cur_mask; /* Merge old events */
if (add_mask & EVENT_READABLE) {
events |= POLLIN;
}
if (add_mask & EVENT_WRITABLE) {
events |= POLLOUT;
}
int ret = poll_ctl(fd, op, events);
return ret;
}
int PollDriver::del_event(int fd, int cur_mask, int delmask) {
ldout(cct, 10) << __func__ << " del event fd=" << fd << " cur mask="
<< cur_mask << dendl;
int op, events = 0;
int mask = cur_mask & (~delmask);
if (mask != EVENT_NONE) {
op = POLL_MOD;
if (mask & EVENT_READABLE) {
events |= POLLIN;
}
if (mask & EVENT_WRITABLE) {
events |= POLLOUT;
}
} else {
op = POLL_DEL;
}
poll_ctl(fd, op, events);
return 0;
}
int PollDriver::resize_events(int newsize) {
return 0;
}
int PollDriver::event_wait(std::vector<FiredFileEvent> &fired_events,
struct timeval *tvp) {
int retval, numevents = 0;
#ifdef _WIN32
retval = WSAPoll(pfds, max_pfds,
tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1);
#else
retval = poll(pfds, max_pfds,
tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1);
#endif
if (retval > 0) {
for (int j = 0; j < max_pfds; j++) {
if (pfds[j].fd != -1) {
int mask = 0;
struct FiredFileEvent fe;
if (pfds[j].revents & POLLIN) {
mask |= EVENT_READABLE;
}
if (pfds[j].revents & POLLOUT) {
mask |= EVENT_WRITABLE;
}
if (pfds[j].revents & POLLHUP) {
mask |= EVENT_READABLE | EVENT_WRITABLE;
}
if (pfds[j].revents & POLLERR) {
mask |= EVENT_READABLE | EVENT_WRITABLE;
}
if (mask) {
fe.fd = pfds[j].fd;
fe.mask = mask;
fired_events.push_back(fe);
numevents++;
}
}
}
}
return numevents;
}
| 4,904 | 23.772727 | 72 |
cc
|
null |
ceph-main/src/msg/async/EventPoll.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Rafael Lopez <[email protected]>
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_EVENTPOLL_H
#define CEPH_MSG_EVENTPOLL_H
#ifdef _WIN32
#include <winsock2.h>
#else
#include <poll.h>
#endif
#include "Event.h"
typedef struct pollfd POLLFD;
class PollDriver : public EventDriver {
int max_pfds;
int hard_max_pfds;
POLLFD *pfds;
CephContext *cct;
private:
int poll_ctl(int, int, int);
public:
explicit PollDriver(CephContext *c): cct(c) {}
~PollDriver() override {}
int init(EventCenter *c, int nevent) override;
int add_event(int fd, int cur_mask, int add_mask) override;
int del_event(int fd, int cur_mask, int del_mask) override;
int resize_events(int newsize) override;
int event_wait(std::vector<FiredFileEvent> &fired_events,
struct timeval *tp) override;
};
#endif
| 1,177 | 22.098039 | 71 |
h
|
null |
ceph-main/src/msg/async/EventSelect.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include "EventSelect.h"
#include <unistd.h>
#include <sys/select.h>
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "SelectDriver."
int SelectDriver::init(EventCenter *c, int nevent)
{
#ifndef _WIN32
ldout(cct, 0) << "Select isn't suitable for production env, just avoid "
<< "compiling error or special purpose" << dendl;
#endif
FD_ZERO(&rfds);
FD_ZERO(&wfds);
max_fd = 0;
return 0;
}
int SelectDriver::add_event(int fd, int cur_mask, int add_mask)
{
ldout(cct, 10) << __func__ << " add event to fd=" << fd << " mask=" << add_mask
<< dendl;
int mask = cur_mask | add_mask;
if (mask & EVENT_READABLE)
FD_SET(fd, &rfds);
if (mask & EVENT_WRITABLE)
FD_SET(fd, &wfds);
if (fd > max_fd)
max_fd = fd;
return 0;
}
int SelectDriver::del_event(int fd, int cur_mask, int delmask)
{
ldout(cct, 10) << __func__ << " del event fd=" << fd << " cur mask=" << cur_mask
<< dendl;
if (delmask & EVENT_READABLE)
FD_CLR(fd, &rfds);
if (delmask & EVENT_WRITABLE)
FD_CLR(fd, &wfds);
return 0;
}
int SelectDriver::resize_events(int newsize)
{
return 0;
}
int SelectDriver::event_wait(std::vector<FiredFileEvent> &fired_events, struct timeval *tvp)
{
int retval, numevents = 0;
memcpy(&_rfds, &rfds, sizeof(fd_set));
memcpy(&_wfds, &wfds, sizeof(fd_set));
retval = select(max_fd+1, &_rfds, &_wfds, NULL, tvp);
if (retval > 0) {
for (int j = 0; j <= max_fd; j++) {
int mask = 0;
struct FiredFileEvent fe;
if (FD_ISSET(j, &_rfds))
mask |= EVENT_READABLE;
if (FD_ISSET(j, &_wfds))
mask |= EVENT_WRITABLE;
if (mask) {
fe.fd = j;
fe.mask = mask;
fired_events.push_back(fe);
numevents++;
}
}
}
return numevents;
}
| 2,356 | 23.05102 | 92 |
cc
|
null |
ceph-main/src/msg/async/EventSelect.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_EVENTSELECT_H
#define CEPH_MSG_EVENTSELECT_H
#include "Event.h"
class SelectDriver : public EventDriver {
fd_set rfds, wfds;
/* We need to have a copy of the fd sets as it's not safe to reuse
* FD sets after select(). */
fd_set _rfds, _wfds;
int max_fd;
CephContext *cct;
public:
explicit SelectDriver(CephContext *c): max_fd(0), cct(c) {}
~SelectDriver() override {}
int init(EventCenter *c, int nevent) override;
int add_event(int fd, int cur_mask, int add_mask) override;
int del_event(int fd, int cur_mask, int del_mask) override;
int resize_events(int newsize) override;
int event_wait(std::vector<FiredFileEvent> &fired_events,
struct timeval *tp) override;
};
#endif
| 1,205 | 27.046512 | 71 |
h
|
null |
ceph-main/src/msg/async/PosixStack.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 XSKY <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/socket.h>
#include <netinet/tcp.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <errno.h>
#include <algorithm>
#include "PosixStack.h"
#include "include/buffer.h"
#include "include/str_list.h"
#include "common/errno.h"
#include "common/strtol.h"
#include "common/dout.h"
#include "msg/Messenger.h"
#include "include/compat.h"
#include "include/sock_compat.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "PosixStack "
class PosixConnectedSocketImpl final : public ConnectedSocketImpl {
ceph::NetHandler &handler;
int _fd;
entity_addr_t sa;
bool connected;
public:
explicit PosixConnectedSocketImpl(ceph::NetHandler &h, const entity_addr_t &sa,
int f, bool connected)
: handler(h), _fd(f), sa(sa), connected(connected) {}
int is_connected() override {
if (connected)
return 1;
int r = handler.reconnect(sa, _fd);
if (r == 0) {
connected = true;
return 1;
} else if (r < 0) {
return r;
} else {
return 0;
}
}
ssize_t read(char *buf, size_t len) override {
#ifdef _WIN32
ssize_t r = ::recv(_fd, buf, len, 0);
#else
ssize_t r = ::read(_fd, buf, len);
#endif
if (r < 0)
r = -ceph_sock_errno();
return r;
}
// return the sent length
// < 0 means error occurred
#ifndef _WIN32
static ssize_t do_sendmsg(int fd, struct msghdr &msg, unsigned len, bool more)
{
size_t sent = 0;
while (1) {
MSGR_SIGPIPE_STOPPER;
ssize_t r;
r = ::sendmsg(fd, &msg, MSG_NOSIGNAL | (more ? MSG_MORE : 0));
if (r < 0) {
int err = ceph_sock_errno();
if (err == EINTR) {
continue;
} else if (err == EAGAIN) {
break;
}
return -err;
}
sent += r;
if (len == sent) break;
while (r > 0) {
if (msg.msg_iov[0].iov_len <= (size_t)r) {
// drain this whole item
r -= msg.msg_iov[0].iov_len;
msg.msg_iov++;
msg.msg_iovlen--;
} else {
msg.msg_iov[0].iov_base = (char *)msg.msg_iov[0].iov_base + r;
msg.msg_iov[0].iov_len -= r;
break;
}
}
}
return (ssize_t)sent;
}
ssize_t send(ceph::buffer::list &bl, bool more) override {
size_t sent_bytes = 0;
auto pb = std::cbegin(bl.buffers());
uint64_t left_pbrs = bl.get_num_buffers();
while (left_pbrs) {
struct msghdr msg;
struct iovec msgvec[IOV_MAX];
uint64_t size = std::min<uint64_t>(left_pbrs, IOV_MAX);
left_pbrs -= size;
// FIPS zeroization audit 20191115: this memset is not security related.
memset(&msg, 0, sizeof(msg));
msg.msg_iovlen = size;
msg.msg_iov = msgvec;
unsigned msglen = 0;
for (auto iov = msgvec; iov != msgvec + size; iov++) {
iov->iov_base = (void*)(pb->c_str());
iov->iov_len = pb->length();
msglen += pb->length();
++pb;
}
ssize_t r = do_sendmsg(_fd, msg, msglen, left_pbrs || more);
if (r < 0)
return r;
// "r" is the remaining length
sent_bytes += r;
if (static_cast<unsigned>(r) < msglen)
break;
// only "r" == 0 continue
}
if (sent_bytes) {
ceph::buffer::list swapped;
if (sent_bytes < bl.length()) {
bl.splice(sent_bytes, bl.length()-sent_bytes, &swapped);
bl.swap(swapped);
} else {
bl.clear();
}
}
return static_cast<ssize_t>(sent_bytes);
}
#else
ssize_t send(bufferlist &bl, bool more) override
{
size_t total_sent_bytes = 0;
auto pb = std::cbegin(bl.buffers());
uint64_t left_pbrs = bl.get_num_buffers();
while (left_pbrs) {
WSABUF msgvec[IOV_MAX];
uint64_t size = std::min<uint64_t>(left_pbrs, IOV_MAX);
left_pbrs -= size;
unsigned msglen = 0;
for (auto iov = msgvec; iov != msgvec + size; iov++) {
iov->buf = const_cast<char*>(pb->c_str());
iov->len = pb->length();
msglen += pb->length();
++pb;
}
DWORD sent_bytes = 0;
DWORD flags = 0;
if (more)
flags |= MSG_PARTIAL;
int ret_val = WSASend(_fd, msgvec, size, &sent_bytes, flags, NULL, NULL);
if (ret_val)
return -ret_val;
total_sent_bytes += sent_bytes;
if (static_cast<unsigned>(sent_bytes) < msglen)
break;
}
if (total_sent_bytes) {
bufferlist swapped;
if (total_sent_bytes < bl.length()) {
bl.splice(total_sent_bytes, bl.length()-total_sent_bytes, &swapped);
bl.swap(swapped);
} else {
bl.clear();
}
}
return static_cast<ssize_t>(total_sent_bytes);
}
#endif
void shutdown() override {
::shutdown(_fd, SHUT_RDWR);
}
void close() override {
compat_closesocket(_fd);
}
void set_priority(int sd, int prio, int domain) override {
handler.set_priority(sd, prio, domain);
}
int fd() const override {
return _fd;
}
friend class PosixServerSocketImpl;
friend class PosixNetworkStack;
};
class PosixServerSocketImpl : public ServerSocketImpl {
ceph::NetHandler &handler;
int _fd;
public:
explicit PosixServerSocketImpl(ceph::NetHandler &h, int f,
const entity_addr_t& listen_addr, unsigned slot)
: ServerSocketImpl(listen_addr.get_type(), slot),
handler(h), _fd(f) {}
int accept(ConnectedSocket *sock, const SocketOptions &opts, entity_addr_t *out, Worker *w) override;
void abort_accept() override {
::close(_fd);
_fd = -1;
}
int fd() const override {
return _fd;
}
};
int PosixServerSocketImpl::accept(ConnectedSocket *sock, const SocketOptions &opt, entity_addr_t *out, Worker *w) {
ceph_assert(sock);
sockaddr_storage ss;
socklen_t slen = sizeof(ss);
int sd = accept_cloexec(_fd, (sockaddr*)&ss, &slen);
if (sd < 0) {
return -ceph_sock_errno();
}
int r = handler.set_nonblock(sd);
if (r < 0) {
::close(sd);
return -ceph_sock_errno();
}
r = handler.set_socket_options(sd, opt.nodelay, opt.rcbuf_size);
if (r < 0) {
::close(sd);
return -ceph_sock_errno();
}
ceph_assert(NULL != out); //out should not be NULL in accept connection
out->set_type(addr_type);
out->set_sockaddr((sockaddr*)&ss);
handler.set_priority(sd, opt.priority, out->get_family());
std::unique_ptr<PosixConnectedSocketImpl> csi(new PosixConnectedSocketImpl(handler, *out, sd, true));
*sock = ConnectedSocket(std::move(csi));
return 0;
}
void PosixWorker::initialize()
{
}
int PosixWorker::listen(entity_addr_t &sa,
unsigned addr_slot,
const SocketOptions &opt,
ServerSocket *sock)
{
int listen_sd = net.create_socket(sa.get_family(), true);
if (listen_sd < 0) {
return -ceph_sock_errno();
}
int r = net.set_nonblock(listen_sd);
if (r < 0) {
::close(listen_sd);
return -ceph_sock_errno();
}
r = net.set_socket_options(listen_sd, opt.nodelay, opt.rcbuf_size);
if (r < 0) {
::close(listen_sd);
return -ceph_sock_errno();
}
r = ::bind(listen_sd, sa.get_sockaddr(), sa.get_sockaddr_len());
if (r < 0) {
r = -ceph_sock_errno();
ldout(cct, 10) << __func__ << " unable to bind to " << sa.get_sockaddr()
<< ": " << cpp_strerror(r) << dendl;
::close(listen_sd);
return r;
}
r = ::listen(listen_sd, cct->_conf->ms_tcp_listen_backlog);
if (r < 0) {
r = -ceph_sock_errno();
lderr(cct) << __func__ << " unable to listen on " << sa << ": " << cpp_strerror(r) << dendl;
::close(listen_sd);
return r;
}
*sock = ServerSocket(
std::unique_ptr<PosixServerSocketImpl>(
new PosixServerSocketImpl(net, listen_sd, sa, addr_slot)));
return 0;
}
int PosixWorker::connect(const entity_addr_t &addr, const SocketOptions &opts, ConnectedSocket *socket) {
int sd;
if (opts.nonblock) {
sd = net.nonblock_connect(addr, opts.connect_bind_addr);
} else {
sd = net.connect(addr, opts.connect_bind_addr);
}
if (sd < 0) {
return -ceph_sock_errno();
}
net.set_priority(sd, opts.priority, addr.get_family());
*socket = ConnectedSocket(
std::unique_ptr<PosixConnectedSocketImpl>(new PosixConnectedSocketImpl(net, addr, sd, !opts.nonblock)));
return 0;
}
PosixNetworkStack::PosixNetworkStack(CephContext *c)
: NetworkStack(c)
{
}
| 8,822 | 24.798246 | 115 |
cc
|
null |
ceph-main/src/msg/async/PosixStack.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 XSKY <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_ASYNC_POSIXSTACK_H
#define CEPH_MSG_ASYNC_POSIXSTACK_H
#include <thread>
#include "msg/msg_types.h"
#include "msg/async/net_handler.h"
#include "Stack.h"
class PosixWorker : public Worker {
ceph::NetHandler net;
void initialize() override;
public:
PosixWorker(CephContext *c, unsigned i)
: Worker(c, i), net(c) {}
int listen(entity_addr_t &sa,
unsigned addr_slot,
const SocketOptions &opt,
ServerSocket *socks) override;
int connect(const entity_addr_t &addr, const SocketOptions &opts, ConnectedSocket *socket) override;
};
class PosixNetworkStack : public NetworkStack {
std::vector<std::thread> threads;
virtual Worker* create_worker(CephContext *c, unsigned worker_id) override {
return new PosixWorker(c, worker_id);
}
public:
explicit PosixNetworkStack(CephContext *c);
void spawn_worker(std::function<void ()> &&func) override {
threads.emplace_back(std::move(func));
}
void join_worker(unsigned i) override {
ceph_assert(threads.size() > i && threads[i].joinable());
threads[i].join();
}
};
#endif //CEPH_MSG_ASYNC_POSIXSTACK_H
| 1,594 | 25.583333 | 102 |
h
|
null |
ceph-main/src/msg/async/Protocol.cc
|
#include "Protocol.h"
#include "AsyncConnection.h"
#include "AsyncMessenger.h"
Protocol::Protocol(int type, AsyncConnection *connection)
: proto_type(type),
connection(connection),
messenger(connection->async_msgr),
cct(connection->async_msgr->cct) {
auth_meta.reset(new AuthConnectionMeta());
}
Protocol::~Protocol() {}
| 340 | 21.733333 | 57 |
cc
|
null |
ceph-main/src/msg/async/Protocol.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef _MSG_ASYNC_PROTOCOL_
#define _MSG_ASYNC_PROTOCOL_
#include <list>
#include <map>
#include "AsyncConnection.h"
#include "include/buffer.h"
#include "include/msgr.h"
/*
* Continuation Helper Classes
*/
#include <memory>
#include <tuple>
template <class C>
class Ct {
public:
virtual ~Ct() {}
virtual Ct<C> *call(C *foo) const = 0;
};
template <class C, typename... Args>
class CtFun : public Ct<C> {
private:
using fn_t = Ct<C> *(C::*)(Args...);
fn_t _f;
std::tuple<Args...> _params;
template <std::size_t... Is>
inline Ct<C> *_call(C *foo, std::index_sequence<Is...>) const {
return (foo->*_f)(std::get<Is>(_params)...);
}
public:
CtFun(fn_t f) : _f(f) {}
inline void setParams(Args... args) { _params = std::make_tuple(args...); }
inline Ct<C> *call(C *foo) const override {
return _call(foo, std::index_sequence_for<Args...>());
}
};
using rx_buffer_t =
std::unique_ptr<ceph::buffer::ptr_node, ceph::buffer::ptr_node::disposer>;
template <class C>
class CtRxNode : public Ct<C> {
using fn_t = Ct<C> *(C::*)(rx_buffer_t&&, int r);
fn_t _f;
public:
mutable rx_buffer_t node;
int r;
CtRxNode(fn_t f) : _f(f) {}
void setParams(rx_buffer_t &&node, int r) {
this->node = std::move(node);
this->r = r;
}
inline Ct<C> *call(C *foo) const override {
return (foo->*_f)(std::move(node), r);
}
};
template <class C> using CONTINUATION_TYPE = CtFun<C>;
template <class C> using CONTINUATION_TX_TYPE = CtFun<C, int>;
template <class C> using CONTINUATION_RX_TYPE = CtFun<C, char*, int>;
template <class C> using CONTINUATION_RXBPTR_TYPE = CtRxNode<C>;
#define CONTINUATION_DECL(C, F, ...) \
CtFun<C, ##__VA_ARGS__> F##_cont { (&C::F) };
#define CONTINUATION(F) F##_cont
#define CONTINUE(F, ...) (F##_cont.setParams(__VA_ARGS__), &F##_cont)
#define CONTINUATION_RUN(CT) \
{ \
Ct<std::remove_reference<decltype(*this)>::type> *_cont = &CT;\
do { \
_cont = _cont->call(this); \
} while (_cont); \
}
#define READ_HANDLER_CONTINUATION_DECL(C, F) \
CONTINUATION_DECL(C, F, char *, int)
#define READ_BPTR_HANDLER_CONTINUATION_DECL(C, F) \
CtRxNode<C> F##_cont { (&C::F) };
#define WRITE_HANDLER_CONTINUATION_DECL(C, F) CONTINUATION_DECL(C, F, int)
//////////////////////////////////////////////////////////////////////
class AsyncMessenger;
class Protocol {
public:
const int proto_type;
protected:
AsyncConnection *connection;
AsyncMessenger *messenger;
CephContext *cct;
public:
std::shared_ptr<AuthConnectionMeta> auth_meta;
public:
Protocol(int type, AsyncConnection *connection);
virtual ~Protocol();
// prepare protocol for connecting to peer
virtual void connect() = 0;
// prepare protocol for accepting peer connections
virtual void accept() = 0;
// true -> protocol is ready for sending messages
virtual bool is_connected() = 0;
// stop connection
virtual void stop() = 0;
// signal and handle connection failure
virtual void fault() = 0;
// send message
virtual void send_message(Message *m) = 0;
// send keepalive
virtual void send_keepalive() = 0;
virtual void read_event() = 0;
virtual void write_event() = 0;
virtual bool is_queued() = 0;
int get_con_mode() const {
return auth_meta->con_mode;
}
};
#endif /* _MSG_ASYNC_PROTOCOL_ */
| 3,665 | 25 | 77 |
h
|
null |
ceph-main/src/msg/async/ProtocolV1.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ProtocolV1.h"
#include "common/errno.h"
#include "AsyncConnection.h"
#include "AsyncMessenger.h"
#include "common/EventTrace.h"
#include "include/random.h"
#include "auth/AuthClient.h"
#include "auth/AuthServer.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix _conn_prefix(_dout)
std::ostream &ProtocolV1::_conn_prefix(std::ostream *_dout) {
return *_dout << "--1- " << messenger->get_myaddrs() << " >> "
<< *connection->peer_addrs
<< " conn("
<< connection << " " << this
<< " :" << connection->port << " s=" << get_state_name(state)
<< " pgs=" << peer_global_seq << " cs=" << connect_seq
<< " l=" << connection->policy.lossy << ").";
}
#define WRITE(B, C) write(CONTINUATION(C), B)
#define READ(L, C) read(CONTINUATION(C), L)
#define READB(L, B, C) read(CONTINUATION(C), L, B)
// Constant to limit starting sequence number to 2^31. Nothing special about
// it, just a big number. PLR
#define SEQ_MASK 0x7fffffff
const int ASYNC_COALESCE_THRESHOLD = 256;
using namespace std;
static void alloc_aligned_buffer(ceph::buffer::list &data, unsigned len, unsigned off) {
// create a buffer to read into that matches the data alignment
unsigned alloc_len = 0;
unsigned left = len;
unsigned head = 0;
if (off & ~CEPH_PAGE_MASK) {
// head
alloc_len += CEPH_PAGE_SIZE;
head = std::min<uint64_t>(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left);
left -= head;
}
alloc_len += left;
ceph::bufferptr ptr(ceph::buffer::create_small_page_aligned(alloc_len));
if (head) ptr.set_offset(CEPH_PAGE_SIZE - head);
data.push_back(std::move(ptr));
}
/**
* Protocol V1
**/
ProtocolV1::ProtocolV1(AsyncConnection *connection)
: Protocol(1, connection),
temp_buffer(nullptr),
can_write(WriteStatus::NOWRITE),
keepalive(false),
connect_seq(0),
peer_global_seq(0),
msg_left(0),
cur_msg_size(0),
replacing(false),
is_reset_from_peer(false),
once_ready(false),
state(NONE),
global_seq(0),
wait_for_seq(false) {
temp_buffer = new char[4096];
}
ProtocolV1::~ProtocolV1() {
ceph_assert(out_q.empty());
ceph_assert(sent.empty());
delete[] temp_buffer;
}
void ProtocolV1::connect() {
this->state = START_CONNECT;
// reset connect state variables
authorizer_buf.clear();
// FIPS zeroization audit 20191115: these memsets are not security related.
memset(&connect_msg, 0, sizeof(connect_msg));
memset(&connect_reply, 0, sizeof(connect_reply));
global_seq = messenger->get_global_seq();
}
void ProtocolV1::accept() { this->state = START_ACCEPT; }
bool ProtocolV1::is_connected() {
return can_write.load() == WriteStatus::CANWRITE;
}
void ProtocolV1::stop() {
ldout(cct, 20) << __func__ << dendl;
if (state == CLOSED) {
return;
}
if (connection->delay_state) connection->delay_state->flush();
ldout(cct, 2) << __func__ << dendl;
std::lock_guard<std::mutex> l(connection->write_lock);
reset_recv_state();
discard_out_queue();
connection->_stop();
can_write = WriteStatus::CLOSED;
state = CLOSED;
}
void ProtocolV1::fault() {
ldout(cct, 20) << __func__ << dendl;
if (state == CLOSED || state == NONE) {
ldout(cct, 10) << __func__ << " connection is already closed" << dendl;
return;
}
if (connection->policy.lossy && state != START_CONNECT &&
state != CONNECTING) {
ldout(cct, 1) << __func__ << " on lossy channel, failing" << dendl;
stop();
connection->dispatch_queue->queue_reset(connection);
return;
}
connection->write_lock.lock();
can_write = WriteStatus::NOWRITE;
is_reset_from_peer = false;
// requeue sent items
requeue_sent();
if (!once_ready && out_q.empty() && state >= START_ACCEPT &&
state <= ACCEPTING_WAIT_CONNECT_MSG_AUTH && !replacing) {
ldout(cct, 10) << __func__ << " with nothing to send and in the half "
<< " accept state just closed" << dendl;
connection->write_lock.unlock();
stop();
connection->dispatch_queue->queue_reset(connection);
return;
}
replacing = false;
connection->fault();
reset_recv_state();
if (connection->policy.standby && out_q.empty() && !keepalive &&
state != WAIT) {
ldout(cct, 10) << __func__ << " with nothing to send, going to standby"
<< dendl;
state = STANDBY;
connection->write_lock.unlock();
return;
}
connection->write_lock.unlock();
if ((state >= START_CONNECT && state <= CONNECTING_SEND_CONNECT_MSG) ||
state == WAIT) {
// backoff!
if (state == WAIT) {
backoff.set_from_double(cct->_conf->ms_max_backoff);
} else if (backoff == utime_t()) {
backoff.set_from_double(cct->_conf->ms_initial_backoff);
} else {
backoff += backoff;
if (backoff > cct->_conf->ms_max_backoff)
backoff.set_from_double(cct->_conf->ms_max_backoff);
}
global_seq = messenger->get_global_seq();
state = START_CONNECT;
connection->state = AsyncConnection::STATE_CONNECTING;
ldout(cct, 10) << __func__ << " waiting " << backoff << dendl;
// woke up again;
connection->register_time_events.insert(
connection->center->create_time_event(backoff.to_nsec() / 1000,
connection->wakeup_handler));
} else {
// policy maybe empty when state is in accept
if (connection->policy.server) {
ldout(cct, 0) << __func__ << " server, going to standby" << dendl;
state = STANDBY;
} else {
ldout(cct, 0) << __func__ << " initiating reconnect" << dendl;
connect_seq++;
global_seq = messenger->get_global_seq();
state = START_CONNECT;
connection->state = AsyncConnection::STATE_CONNECTING;
}
backoff = utime_t();
connection->center->dispatch_event_external(connection->read_handler);
}
}
void ProtocolV1::send_message(Message *m) {
ceph::buffer::list bl;
uint64_t f = connection->get_features();
// TODO: Currently not all messages supports reencode like MOSDMap, so here
// only let fast dispatch support messages prepare message
bool can_fast_prepare = messenger->ms_can_fast_dispatch(m);
if (can_fast_prepare) {
prepare_send_message(f, m, bl);
}
std::lock_guard<std::mutex> l(connection->write_lock);
// "features" changes will change the payload encoding
if (can_fast_prepare &&
(can_write == WriteStatus::NOWRITE || connection->get_features() != f)) {
// ensure the correctness of message encoding
bl.clear();
m->clear_payload();
ldout(cct, 5) << __func__ << " clear encoded buffer previous " << f
<< " != " << connection->get_features() << dendl;
}
if (can_write == WriteStatus::CLOSED) {
ldout(cct, 10) << __func__ << " connection closed."
<< " Drop message " << m << dendl;
m->put();
} else {
m->queue_start = ceph::mono_clock::now();
m->trace.event("async enqueueing message");
out_q[m->get_priority()].emplace_back(std::move(bl), m);
ldout(cct, 15) << __func__ << " inline write is denied, reschedule m=" << m
<< dendl;
if (can_write != WriteStatus::REPLACING && !write_in_progress) {
write_in_progress = true;
connection->center->dispatch_event_external(connection->write_handler);
}
}
}
void ProtocolV1::prepare_send_message(uint64_t features, Message *m,
ceph::buffer::list &bl) {
ldout(cct, 20) << __func__ << " m " << *m << dendl;
// associate message with Connection (for benefit of encode_payload)
ldout(cct, 20) << __func__ << (m->empty_payload() ? " encoding features " : " half-reencoding features ")
<< features << " " << m << " " << *m << dendl;
// encode and copy out of *m
// in write_message we update header.seq and need recalc crc
// so skip calc header in encode function.
m->encode(features, messenger->crcflags, true);
bl.append(m->get_payload());
bl.append(m->get_middle());
bl.append(m->get_data());
}
void ProtocolV1::send_keepalive() {
ldout(cct, 10) << __func__ << dendl;
std::lock_guard<std::mutex> l(connection->write_lock);
if (can_write != WriteStatus::CLOSED) {
keepalive = true;
connection->center->dispatch_event_external(connection->write_handler);
}
}
void ProtocolV1::read_event() {
ldout(cct, 20) << __func__ << dendl;
switch (state) {
case START_CONNECT:
CONTINUATION_RUN(CONTINUATION(send_client_banner));
break;
case START_ACCEPT:
CONTINUATION_RUN(CONTINUATION(send_server_banner));
break;
case OPENED:
CONTINUATION_RUN(CONTINUATION(wait_message));
break;
case THROTTLE_MESSAGE:
CONTINUATION_RUN(CONTINUATION(throttle_message));
break;
case THROTTLE_BYTES:
CONTINUATION_RUN(CONTINUATION(throttle_bytes));
break;
case THROTTLE_DISPATCH_QUEUE:
CONTINUATION_RUN(CONTINUATION(throttle_dispatch_queue));
break;
default:
break;
}
}
void ProtocolV1::write_event() {
ldout(cct, 10) << __func__ << dendl;
ssize_t r = 0;
connection->write_lock.lock();
if (can_write == WriteStatus::CANWRITE) {
if (keepalive) {
append_keepalive_or_ack();
keepalive = false;
}
auto start = ceph::mono_clock::now();
bool more;
do {
if (connection->is_queued()) {
if (r = connection->_try_send(); r!= 0) {
// either fails to send or not all queued buffer is sent
break;
}
}
ceph::buffer::list data;
Message *m = _get_next_outgoing(&data);
if (!m) {
break;
}
if (!connection->policy.lossy) {
// put on sent list
sent.push_back(m);
m->get();
}
more = !out_q.empty();
connection->write_lock.unlock();
// send_message or requeue messages may not encode message
if (!data.length()) {
prepare_send_message(connection->get_features(), m, data);
}
if (m->queue_start != ceph::mono_time()) {
connection->logger->tinc(l_msgr_send_messages_queue_lat,
ceph::mono_clock::now() - m->queue_start);
}
r = write_message(m, data, more);
connection->write_lock.lock();
if (r == 0) {
;
} else if (r < 0) {
ldout(cct, 1) << __func__ << " send msg failed" << dendl;
break;
} else if (r > 0) {
// Outbound message in-progress, thread will be re-awoken
// when the outbound socket is writeable again
break;
}
} while (can_write == WriteStatus::CANWRITE);
write_in_progress = false;
connection->write_lock.unlock();
// if r > 0 mean data still lefted, so no need _try_send.
if (r == 0) {
uint64_t left = ack_left;
if (left) {
ceph_le64 s;
s = in_seq;
connection->outgoing_bl.append(CEPH_MSGR_TAG_ACK);
connection->outgoing_bl.append((char *)&s, sizeof(s));
ldout(cct, 10) << __func__ << " try send msg ack, acked " << left
<< " messages" << dendl;
ack_left -= left;
left = ack_left;
r = connection->_try_send(left);
} else if (is_queued()) {
r = connection->_try_send();
}
}
connection->logger->tinc(l_msgr_running_send_time,
ceph::mono_clock::now() - start);
if (r < 0) {
ldout(cct, 1) << __func__ << " send msg failed" << dendl;
connection->lock.lock();
fault();
connection->lock.unlock();
return;
}
} else {
write_in_progress = false;
connection->write_lock.unlock();
connection->lock.lock();
connection->write_lock.lock();
if (state == STANDBY && !connection->policy.server && is_queued()) {
ldout(cct, 10) << __func__ << " policy.server is false" << dendl;
connection->_connect();
} else if (connection->cs && state != NONE && state != CLOSED &&
state != START_CONNECT) {
r = connection->_try_send();
if (r < 0) {
ldout(cct, 1) << __func__ << " send outcoming bl failed" << dendl;
connection->write_lock.unlock();
fault();
connection->lock.unlock();
return;
}
}
connection->write_lock.unlock();
connection->lock.unlock();
}
}
bool ProtocolV1::is_queued() {
return !out_q.empty() || connection->is_queued();
}
void ProtocolV1::run_continuation(CtPtr pcontinuation) {
if (pcontinuation) {
CONTINUATION_RUN(*pcontinuation);
}
}
CtPtr ProtocolV1::read(CONTINUATION_RX_TYPE<ProtocolV1> &next,
int len, char *buffer) {
if (!buffer) {
buffer = temp_buffer;
}
ssize_t r = connection->read(len, buffer,
[&next, this](char *buffer, int r) {
next.setParams(buffer, r);
CONTINUATION_RUN(next);
});
if (r <= 0) {
next.setParams(buffer, r);
return &next;
}
return nullptr;
}
CtPtr ProtocolV1::write(CONTINUATION_TX_TYPE<ProtocolV1> &next,
ceph::buffer::list &buffer) {
ssize_t r = connection->write(buffer, [&next, this](int r) {
next.setParams(r);
CONTINUATION_RUN(next);
});
if (r <= 0) {
next.setParams(r);
return &next;
}
return nullptr;
}
CtPtr ProtocolV1::ready() {
ldout(cct, 25) << __func__ << dendl;
// make sure no pending tick timer
if (connection->last_tick_id) {
connection->center->delete_time_event(connection->last_tick_id);
}
connection->last_tick_id = connection->center->create_time_event(
connection->inactive_timeout_us, connection->tick_handler);
connection->write_lock.lock();
can_write = WriteStatus::CANWRITE;
if (is_queued()) {
connection->center->dispatch_event_external(connection->write_handler);
}
connection->write_lock.unlock();
connection->maybe_start_delay_thread();
state = OPENED;
return wait_message();
}
CtPtr ProtocolV1::wait_message() {
if (state != OPENED) { // must have changed due to a replace
return nullptr;
}
ldout(cct, 20) << __func__ << dendl;
return READ(sizeof(char), handle_message);
}
CtPtr ProtocolV1::handle_message(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read tag failed" << dendl;
return _fault();
}
char tag = buffer[0];
ldout(cct, 20) << __func__ << " process tag " << (int)tag << dendl;
if (tag == CEPH_MSGR_TAG_KEEPALIVE) {
ldout(cct, 20) << __func__ << " got KEEPALIVE" << dendl;
connection->set_last_keepalive(ceph_clock_now());
} else if (tag == CEPH_MSGR_TAG_KEEPALIVE2) {
return READ(sizeof(ceph_timespec), handle_keepalive2);
} else if (tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
return READ(sizeof(ceph_timespec), handle_keepalive2_ack);
} else if (tag == CEPH_MSGR_TAG_ACK) {
return READ(sizeof(ceph_le64), handle_tag_ack);
} else if (tag == CEPH_MSGR_TAG_MSG) {
recv_stamp = ceph_clock_now();
ldout(cct, 20) << __func__ << " begin MSG" << dendl;
return READ(sizeof(ceph_msg_header), handle_message_header);
} else if (tag == CEPH_MSGR_TAG_CLOSE) {
ldout(cct, 20) << __func__ << " got CLOSE" << dendl;
stop();
} else {
ldout(cct, 0) << __func__ << " bad tag " << (int)tag << dendl;
return _fault();
}
return nullptr;
}
CtPtr ProtocolV1::handle_keepalive2(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read keeplive timespec failed" << dendl;
return _fault();
}
ldout(cct, 30) << __func__ << " got KEEPALIVE2 tag ..." << dendl;
ceph_timespec *t;
t = (ceph_timespec *)buffer;
utime_t kp_t = utime_t(*t);
connection->write_lock.lock();
append_keepalive_or_ack(true, &kp_t);
connection->write_lock.unlock();
ldout(cct, 20) << __func__ << " got KEEPALIVE2 " << kp_t << dendl;
connection->set_last_keepalive(ceph_clock_now());
if (is_connected()) {
connection->center->dispatch_event_external(connection->write_handler);
}
return CONTINUE(wait_message);
}
void ProtocolV1::append_keepalive_or_ack(bool ack, utime_t *tp) {
ldout(cct, 10) << __func__ << dendl;
if (ack) {
ceph_assert(tp);
struct ceph_timespec ts;
tp->encode_timeval(&ts);
connection->outgoing_bl.append(CEPH_MSGR_TAG_KEEPALIVE2_ACK);
connection->outgoing_bl.append((char *)&ts, sizeof(ts));
} else if (connection->has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) {
struct ceph_timespec ts;
utime_t t = ceph_clock_now();
t.encode_timeval(&ts);
connection->outgoing_bl.append(CEPH_MSGR_TAG_KEEPALIVE2);
connection->outgoing_bl.append((char *)&ts, sizeof(ts));
} else {
connection->outgoing_bl.append(CEPH_MSGR_TAG_KEEPALIVE);
}
}
CtPtr ProtocolV1::handle_keepalive2_ack(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read keeplive timespec failed" << dendl;
return _fault();
}
ceph_timespec *t;
t = (ceph_timespec *)buffer;
connection->set_last_keepalive_ack(utime_t(*t));
ldout(cct, 20) << __func__ << " got KEEPALIVE_ACK" << dendl;
return CONTINUE(wait_message);
}
CtPtr ProtocolV1::handle_tag_ack(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read ack seq failed" << dendl;
return _fault();
}
ceph_le64 seq;
seq = *(ceph_le64 *)buffer;
ldout(cct, 20) << __func__ << " got ACK" << dendl;
ldout(cct, 15) << __func__ << " got ack seq " << seq << dendl;
// trim sent list
static const int max_pending = 128;
int i = 0;
auto now = ceph::mono_clock::now();
Message *pending[max_pending];
connection->write_lock.lock();
while (!sent.empty() && sent.front()->get_seq() <= seq && i < max_pending) {
Message *m = sent.front();
sent.pop_front();
pending[i++] = m;
ldout(cct, 10) << __func__ << " got ack seq " << seq
<< " >= " << m->get_seq() << " on " << m << " " << *m
<< dendl;
}
connection->write_lock.unlock();
connection->logger->tinc(l_msgr_handle_ack_lat, ceph::mono_clock::now() - now);
for (int k = 0; k < i; k++) {
pending[k]->put();
}
return CONTINUE(wait_message);
}
CtPtr ProtocolV1::handle_message_header(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read message header failed" << dendl;
return _fault();
}
ldout(cct, 20) << __func__ << " got MSG header" << dendl;
current_header = *((ceph_msg_header *)buffer);
ldout(cct, 20) << __func__ << " got envelope type=" << current_header.type << " src "
<< entity_name_t(current_header.src) << " front=" << current_header.front_len
<< " data=" << current_header.data_len << " off " << current_header.data_off
<< dendl;
if (messenger->crcflags & MSG_CRC_HEADER) {
__u32 header_crc = 0;
header_crc = ceph_crc32c(0, (unsigned char *)¤t_header,
sizeof(current_header) - sizeof(current_header.crc));
// verify header crc
if (header_crc != current_header.crc) {
ldout(cct, 0) << __func__ << " got bad header crc " << header_crc
<< " != " << current_header.crc << dendl;
return _fault();
}
}
// Reset state
data_buf.clear();
front.clear();
middle.clear();
data.clear();
state = THROTTLE_MESSAGE;
return CONTINUE(throttle_message);
}
CtPtr ProtocolV1::throttle_message() {
ldout(cct, 20) << __func__ << dendl;
if (connection->policy.throttler_messages) {
ldout(cct, 10) << __func__ << " wants " << 1
<< " message from policy throttler "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< dendl;
if (!connection->policy.throttler_messages->get_or_fail()) {
ldout(cct, 1) << __func__ << " wants 1 message from policy throttle "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< " failed, just wait." << dendl;
// following thread pool deal with th full message queue isn't a
// short time, so we can wait a ms.
if (connection->register_time_events.empty()) {
connection->register_time_events.insert(
connection->center->create_time_event(1000,
connection->wakeup_handler));
}
return nullptr;
}
}
state = THROTTLE_BYTES;
return CONTINUE(throttle_bytes);
}
CtPtr ProtocolV1::throttle_bytes() {
ldout(cct, 20) << __func__ << dendl;
cur_msg_size = current_header.front_len + current_header.middle_len +
current_header.data_len;
if (cur_msg_size) {
if (connection->policy.throttler_bytes) {
ldout(cct, 10) << __func__ << " wants " << cur_msg_size
<< " bytes from policy throttler "
<< connection->policy.throttler_bytes->get_current() << "/"
<< connection->policy.throttler_bytes->get_max() << dendl;
if (!connection->policy.throttler_bytes->get_or_fail(cur_msg_size)) {
ldout(cct, 1) << __func__ << " wants " << cur_msg_size
<< " bytes from policy throttler "
<< connection->policy.throttler_bytes->get_current()
<< "/" << connection->policy.throttler_bytes->get_max()
<< " failed, just wait." << dendl;
// following thread pool deal with th full message queue isn't a
// short time, so we can wait a ms.
if (connection->register_time_events.empty()) {
connection->register_time_events.insert(
connection->center->create_time_event(
1000, connection->wakeup_handler));
}
return nullptr;
}
}
}
state = THROTTLE_DISPATCH_QUEUE;
return CONTINUE(throttle_dispatch_queue);
}
CtPtr ProtocolV1::throttle_dispatch_queue() {
ldout(cct, 20) << __func__ << dendl;
if (cur_msg_size) {
if (!connection->dispatch_queue->dispatch_throttler.get_or_fail(
cur_msg_size)) {
ldout(cct, 1)
<< __func__ << " wants " << cur_msg_size
<< " bytes from dispatch throttle "
<< connection->dispatch_queue->dispatch_throttler.get_current() << "/"
<< connection->dispatch_queue->dispatch_throttler.get_max()
<< " failed, just wait." << dendl;
// following thread pool deal with th full message queue isn't a
// short time, so we can wait a ms.
if (connection->register_time_events.empty()) {
connection->register_time_events.insert(
connection->center->create_time_event(1000,
connection->wakeup_handler));
}
return nullptr;
}
}
throttle_stamp = ceph_clock_now();
state = READ_MESSAGE_FRONT;
return read_message_front();
}
CtPtr ProtocolV1::read_message_front() {
ldout(cct, 20) << __func__ << dendl;
unsigned front_len = current_header.front_len;
if (front_len) {
if (!front.length()) {
front.push_back(ceph::buffer::create(front_len));
}
return READB(front_len, front.c_str(), handle_message_front);
}
return read_message_middle();
}
CtPtr ProtocolV1::handle_message_front(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read message front failed" << dendl;
return _fault();
}
ldout(cct, 20) << __func__ << " got front " << front.length() << dendl;
return read_message_middle();
}
CtPtr ProtocolV1::read_message_middle() {
ldout(cct, 20) << __func__ << dendl;
if (current_header.middle_len) {
if (!middle.length()) {
middle.push_back(ceph::buffer::create(current_header.middle_len));
}
return READB(current_header.middle_len, middle.c_str(),
handle_message_middle);
}
return read_message_data_prepare();
}
CtPtr ProtocolV1::handle_message_middle(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read message middle failed" << dendl;
return _fault();
}
ldout(cct, 20) << __func__ << " got middle " << middle.length() << dendl;
return read_message_data_prepare();
}
CtPtr ProtocolV1::read_message_data_prepare() {
ldout(cct, 20) << __func__ << dendl;
unsigned data_len = current_header.data_len;
unsigned data_off = current_header.data_off;
if (data_len) {
// get a buffer
#if 0
// rx_buffers is broken by design... see
// http://tracker.ceph.com/issues/22480
map<ceph_tid_t, pair<ceph::buffer::list, int> >::iterator p =
connection->rx_buffers.find(current_header.tid);
if (p != connection->rx_buffers.end()) {
ldout(cct, 10) << __func__ << " seleting rx buffer v " << p->second.second
<< " at offset " << data_off << " len "
<< p->second.first.length() << dendl;
data_buf = p->second.first;
// make sure it's big enough
if (data_buf.length() < data_len)
data_buf.push_back(buffer::create(data_len - data_buf.length()));
data_blp = data_buf.begin();
} else {
ldout(cct, 20) << __func__ << " allocating new rx buffer at offset "
<< data_off << dendl;
alloc_aligned_buffer(data_buf, data_len, data_off);
data_blp = data_buf.begin();
}
#else
ldout(cct, 20) << __func__ << " allocating new rx buffer at offset "
<< data_off << dendl;
alloc_aligned_buffer(data_buf, data_len, data_off);
data_blp = data_buf.begin();
#endif
}
msg_left = data_len;
return CONTINUE(read_message_data);
}
CtPtr ProtocolV1::read_message_data() {
ldout(cct, 20) << __func__ << " msg_left=" << msg_left << dendl;
if (msg_left > 0) {
auto bp = data_blp.get_current_ptr();
unsigned read_len = std::min(bp.length(), msg_left);
return READB(read_len, bp.c_str(), handle_message_data);
}
return read_message_footer();
}
CtPtr ProtocolV1::handle_message_data(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read data error " << dendl;
return _fault();
}
auto bp = data_blp.get_current_ptr();
unsigned read_len = std::min(bp.length(), msg_left);
ceph_assert(read_len <
static_cast<unsigned>(std::numeric_limits<int>::max()));
data_blp += read_len;
data.append(bp, 0, read_len);
msg_left -= read_len;
return CONTINUE(read_message_data);
}
CtPtr ProtocolV1::read_message_footer() {
ldout(cct, 20) << __func__ << dendl;
state = READ_FOOTER_AND_DISPATCH;
unsigned len;
if (connection->has_feature(CEPH_FEATURE_MSG_AUTH)) {
len = sizeof(ceph_msg_footer);
} else {
len = sizeof(ceph_msg_footer_old);
}
return READ(len, handle_message_footer);
}
CtPtr ProtocolV1::handle_message_footer(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read footer data error " << dendl;
return _fault();
}
ceph_msg_footer footer;
ceph_msg_footer_old old_footer;
if (connection->has_feature(CEPH_FEATURE_MSG_AUTH)) {
footer = *((ceph_msg_footer *)buffer);
} else {
old_footer = *((ceph_msg_footer_old *)buffer);
footer.front_crc = old_footer.front_crc;
footer.middle_crc = old_footer.middle_crc;
footer.data_crc = old_footer.data_crc;
footer.sig = 0;
footer.flags = old_footer.flags;
}
int aborted = (footer.flags & CEPH_MSG_FOOTER_COMPLETE) == 0;
ldout(cct, 10) << __func__ << " aborted = " << aborted << dendl;
if (aborted) {
ldout(cct, 0) << __func__ << " got " << front.length() << " + "
<< middle.length() << " + " << data.length()
<< " byte message.. ABORTED" << dendl;
return _fault();
}
ldout(cct, 20) << __func__ << " got " << front.length() << " + "
<< middle.length() << " + " << data.length() << " byte message"
<< dendl;
Message *message = decode_message(cct, messenger->crcflags, current_header,
footer, front, middle, data, connection);
if (!message) {
ldout(cct, 1) << __func__ << " decode message failed " << dendl;
return _fault();
}
//
// Check the signature if one should be present. A zero return indicates
// success. PLR
//
if (session_security.get() == NULL) {
ldout(cct, 10) << __func__ << " no session security set" << dendl;
} else {
if (session_security->check_message_signature(message)) {
ldout(cct, 0) << __func__ << " Signature check failed" << dendl;
message->put();
return _fault();
}
}
message->set_byte_throttler(connection->policy.throttler_bytes);
message->set_message_throttler(connection->policy.throttler_messages);
// store reservation size in message, so we don't get confused
// by messages entering the dispatch queue through other paths.
message->set_dispatch_throttle_size(cur_msg_size);
message->set_recv_stamp(recv_stamp);
message->set_throttle_stamp(throttle_stamp);
message->set_recv_complete_stamp(ceph_clock_now());
// check received seq#. if it is old, drop the message.
// note that incoming messages may skip ahead. this is convenient for the
// client side queueing because messages can't be renumbered, but the (kernel)
// client will occasionally pull a message out of the sent queue to send
// elsewhere. in that case it doesn't matter if we "got" it or not.
uint64_t cur_seq = in_seq;
if (message->get_seq() <= cur_seq) {
ldout(cct, 0) << __func__ << " got old message " << message->get_seq()
<< " <= " << cur_seq << " " << message << " " << *message
<< ", discarding" << dendl;
message->put();
if (connection->has_feature(CEPH_FEATURE_RECONNECT_SEQ) &&
cct->_conf->ms_die_on_old_message) {
ceph_assert(0 == "old msgs despite reconnect_seq feature");
}
return nullptr;
}
if (message->get_seq() > cur_seq + 1) {
ldout(cct, 0) << __func__ << " missed message? skipped from seq "
<< cur_seq << " to " << message->get_seq() << dendl;
if (cct->_conf->ms_die_on_skipped_message) {
ceph_assert(0 == "skipped incoming seq");
}
}
#if defined(WITH_EVENTTRACE)
if (message->get_type() == CEPH_MSG_OSD_OP ||
message->get_type() == CEPH_MSG_OSD_OPREPLY) {
utime_t ltt_processed_stamp = ceph_clock_now();
double usecs_elapsed =
((double)(ltt_processed_stamp.to_nsec() - recv_stamp.to_nsec())) / 1000;
ostringstream buf;
if (message->get_type() == CEPH_MSG_OSD_OP)
OID_ELAPSED_WITH_MSG(message, usecs_elapsed, "TIME_TO_DECODE_OSD_OP",
false);
else
OID_ELAPSED_WITH_MSG(message, usecs_elapsed, "TIME_TO_DECODE_OSD_OPREPLY",
false);
}
#endif
// note last received message.
in_seq = message->get_seq();
ldout(cct, 5) << " rx " << message->get_source() << " seq "
<< message->get_seq() << " " << message << " " << *message
<< dendl;
bool need_dispatch_writer = false;
if (!connection->policy.lossy) {
ack_left++;
need_dispatch_writer = true;
}
state = OPENED;
ceph::mono_time fast_dispatch_time;
if (connection->is_blackhole()) {
ldout(cct, 10) << __func__ << " blackhole " << *message << dendl;
message->put();
goto out;
}
connection->logger->inc(l_msgr_recv_messages);
connection->logger->inc(
l_msgr_recv_bytes,
cur_msg_size + sizeof(ceph_msg_header) + sizeof(ceph_msg_footer));
messenger->ms_fast_preprocess(message);
fast_dispatch_time = ceph::mono_clock::now();
connection->logger->tinc(l_msgr_running_recv_time,
fast_dispatch_time - connection->recv_start_time);
if (connection->delay_state) {
double delay_period = 0;
if (rand() % 10000 < cct->_conf->ms_inject_delay_probability * 10000.0) {
delay_period =
cct->_conf->ms_inject_delay_max * (double)(rand() % 10000) / 10000.0;
ldout(cct, 1) << "queue_received will delay after "
<< (ceph_clock_now() + delay_period) << " on " << message
<< " " << *message << dendl;
}
connection->delay_state->queue(delay_period, message);
} else if (messenger->ms_can_fast_dispatch(message)) {
connection->lock.unlock();
connection->dispatch_queue->fast_dispatch(message);
connection->recv_start_time = ceph::mono_clock::now();
connection->logger->tinc(l_msgr_running_fast_dispatch_time,
connection->recv_start_time - fast_dispatch_time);
connection->lock.lock();
} else {
connection->dispatch_queue->enqueue(message, message->get_priority(),
connection->conn_id);
}
out:
// clean up local buffer references
data_buf.clear();
front.clear();
middle.clear();
data.clear();
if (need_dispatch_writer && connection->is_connected()) {
connection->center->dispatch_event_external(connection->write_handler);
}
return CONTINUE(wait_message);
}
void ProtocolV1::session_reset() {
ldout(cct, 10) << __func__ << " started" << dendl;
std::lock_guard<std::mutex> l(connection->write_lock);
if (connection->delay_state) {
connection->delay_state->discard();
}
connection->dispatch_queue->discard_queue(connection->conn_id);
discard_out_queue();
// note: we need to clear outgoing_bl here, but session_reset may be
// called by other thread, so let caller clear this itself!
// outgoing_bl.clear();
connection->dispatch_queue->queue_remote_reset(connection);
randomize_out_seq();
in_seq = 0;
connect_seq = 0;
// it's safe to directly set 0, double locked
ack_left = 0;
once_ready = false;
can_write = WriteStatus::NOWRITE;
}
void ProtocolV1::randomize_out_seq() {
if (connection->get_features() & CEPH_FEATURE_MSG_AUTH) {
// Set out_seq to a random value, so CRC won't be predictable.
auto rand_seq = ceph::util::generate_random_number<uint64_t>(0, SEQ_MASK);
ldout(cct, 10) << __func__ << " randomize_out_seq " << rand_seq << dendl;
out_seq = rand_seq;
} else {
// previously, seq #'s always started at 0.
out_seq = 0;
}
}
ssize_t ProtocolV1::write_message(Message *m, ceph::buffer::list &bl, bool more) {
FUNCTRACE(cct);
ceph_assert(connection->center->in_thread());
m->set_seq(++out_seq);
if (messenger->crcflags & MSG_CRC_HEADER) {
m->calc_header_crc();
}
ceph_msg_header &header = m->get_header();
ceph_msg_footer &footer = m->get_footer();
// TODO: let sign_message could be reentry?
// Now that we have all the crcs calculated, handle the
// digital signature for the message, if the AsyncConnection has session
// security set up. Some session security options do not
// actually calculate and check the signature, but they should
// handle the calls to sign_message and check_signature. PLR
if (session_security.get() == NULL) {
ldout(cct, 20) << __func__ << " no session security" << dendl;
} else {
if (session_security->sign_message(m)) {
ldout(cct, 20) << __func__ << " failed to sign m=" << m
<< "): sig = " << footer.sig << dendl;
} else {
ldout(cct, 20) << __func__ << " signed m=" << m
<< "): sig = " << footer.sig << dendl;
}
}
connection->outgoing_bl.append(CEPH_MSGR_TAG_MSG);
connection->outgoing_bl.append((char *)&header, sizeof(header));
ldout(cct, 20) << __func__ << " sending message type=" << header.type
<< " src " << entity_name_t(header.src)
<< " front=" << header.front_len << " data=" << header.data_len
<< " off " << header.data_off << dendl;
if ((bl.length() <= ASYNC_COALESCE_THRESHOLD) && (bl.get_num_buffers() > 1)) {
for (const auto &pb : bl.buffers()) {
connection->outgoing_bl.append((char *)pb.c_str(), pb.length());
}
} else {
connection->outgoing_bl.claim_append(bl);
}
// send footer; if receiver doesn't support signatures, use the old footer
// format
ceph_msg_footer_old old_footer;
if (connection->has_feature(CEPH_FEATURE_MSG_AUTH)) {
connection->outgoing_bl.append((char *)&footer, sizeof(footer));
} else {
if (messenger->crcflags & MSG_CRC_HEADER) {
old_footer.front_crc = footer.front_crc;
old_footer.middle_crc = footer.middle_crc;
} else {
old_footer.front_crc = old_footer.middle_crc = 0;
}
old_footer.data_crc =
messenger->crcflags & MSG_CRC_DATA ? footer.data_crc : 0;
old_footer.flags = footer.flags;
connection->outgoing_bl.append((char *)&old_footer, sizeof(old_footer));
}
m->trace.event("async writing message");
ldout(cct, 20) << __func__ << " sending " << m->get_seq() << " " << m
<< dendl;
ssize_t total_send_size = connection->outgoing_bl.length();
ssize_t rc = connection->_try_send(more);
if (rc < 0) {
ldout(cct, 1) << __func__ << " error sending " << m << ", "
<< cpp_strerror(rc) << dendl;
} else {
connection->logger->inc(
l_msgr_send_bytes, total_send_size - connection->outgoing_bl.length());
ldout(cct, 10) << __func__ << " sending " << m
<< (rc ? " continuely." : " done.") << dendl;
}
#if defined(WITH_EVENTTRACE)
if (m->get_type() == CEPH_MSG_OSD_OP)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OP_END", false);
else if (m->get_type() == CEPH_MSG_OSD_OPREPLY)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OPREPLY_END", false);
#endif
m->put();
return rc;
}
void ProtocolV1::requeue_sent() {
write_in_progress = false;
if (sent.empty()) {
return;
}
list<pair<ceph::buffer::list, Message *> > &rq = out_q[CEPH_MSG_PRIO_HIGHEST];
out_seq -= sent.size();
while (!sent.empty()) {
Message *m = sent.back();
sent.pop_back();
ldout(cct, 10) << __func__ << " " << *m << " for resend "
<< " (" << m->get_seq() << ")" << dendl;
m->clear_payload();
rq.push_front(make_pair(ceph::buffer::list(), m));
}
}
uint64_t ProtocolV1::discard_requeued_up_to(uint64_t out_seq, uint64_t seq) {
ldout(cct, 10) << __func__ << " " << seq << dendl;
std::lock_guard<std::mutex> l(connection->write_lock);
if (out_q.count(CEPH_MSG_PRIO_HIGHEST) == 0) {
return seq;
}
list<pair<ceph::buffer::list, Message *> > &rq = out_q[CEPH_MSG_PRIO_HIGHEST];
uint64_t count = out_seq;
while (!rq.empty()) {
pair<ceph::buffer::list, Message *> p = rq.front();
if (p.second->get_seq() == 0 || p.second->get_seq() > seq) break;
ldout(cct, 10) << __func__ << " " << *(p.second) << " for resend seq "
<< p.second->get_seq() << " <= " << seq << ", discarding"
<< dendl;
p.second->put();
rq.pop_front();
count++;
}
if (rq.empty()) out_q.erase(CEPH_MSG_PRIO_HIGHEST);
return count;
}
/*
* Tears down the message queues, and removes them from the
* DispatchQueue Must hold write_lock prior to calling.
*/
void ProtocolV1::discard_out_queue() {
ldout(cct, 10) << __func__ << " started" << dendl;
for (list<Message *>::iterator p = sent.begin(); p != sent.end(); ++p) {
ldout(cct, 20) << __func__ << " discard " << *p << dendl;
(*p)->put();
}
sent.clear();
for (map<int, list<pair<ceph::buffer::list, Message *> > >::iterator p =
out_q.begin();
p != out_q.end(); ++p) {
for (list<pair<ceph::buffer::list, Message *> >::iterator r = p->second.begin();
r != p->second.end(); ++r) {
ldout(cct, 20) << __func__ << " discard " << r->second << dendl;
r->second->put();
}
}
out_q.clear();
write_in_progress = false;
}
void ProtocolV1::reset_security()
{
ldout(cct, 5) << __func__ << dendl;
auth_meta.reset(new AuthConnectionMeta);
authorizer_more.clear();
session_security.reset();
}
void ProtocolV1::reset_recv_state()
{
ldout(cct, 5) << __func__ << dendl;
// execute in the same thread that uses the `session_security`.
// We need to do the warp because holding `write_lock` is not
// enough as `write_event()` releases it just before calling
// `write_message()`. `submit_to()` here is NOT blocking.
if (!connection->center->in_thread()) {
connection->center->submit_to(connection->center->get_id(), [this] {
ldout(cct, 5) << "reset_recv_state (warped) reseting security handlers"
<< dendl;
// Possibly unnecessary. See the comment in `deactivate_existing`.
std::lock_guard<std::mutex> l(connection->lock);
std::lock_guard<std::mutex> wl(connection->write_lock);
reset_security();
}, /* always_async = */true);
} else {
reset_security();
}
// clean read and write callbacks
connection->pendingReadLen.reset();
connection->writeCallback.reset();
if (state > THROTTLE_MESSAGE && state <= READ_FOOTER_AND_DISPATCH &&
connection->policy.throttler_messages) {
ldout(cct, 10) << __func__ << " releasing " << 1
<< " message to policy throttler "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< dendl;
connection->policy.throttler_messages->put();
}
if (state > THROTTLE_BYTES && state <= READ_FOOTER_AND_DISPATCH) {
if (connection->policy.throttler_bytes) {
ldout(cct, 10) << __func__ << " releasing " << cur_msg_size
<< " bytes to policy throttler "
<< connection->policy.throttler_bytes->get_current() << "/"
<< connection->policy.throttler_bytes->get_max() << dendl;
connection->policy.throttler_bytes->put(cur_msg_size);
}
}
if (state > THROTTLE_DISPATCH_QUEUE && state <= READ_FOOTER_AND_DISPATCH) {
ldout(cct, 10)
<< __func__ << " releasing " << cur_msg_size
<< " bytes to dispatch_queue throttler "
<< connection->dispatch_queue->dispatch_throttler.get_current() << "/"
<< connection->dispatch_queue->dispatch_throttler.get_max() << dendl;
connection->dispatch_queue->dispatch_throttle_release(cur_msg_size);
}
}
Message *ProtocolV1::_get_next_outgoing(ceph::buffer::list *bl) {
Message *m = 0;
if (!out_q.empty()) {
map<int, list<pair<ceph::buffer::list, Message *> > >::reverse_iterator it =
out_q.rbegin();
ceph_assert(!it->second.empty());
list<pair<ceph::buffer::list, Message *> >::iterator p = it->second.begin();
m = p->second;
if (p->first.length() && bl) {
assert(bl->length() == 0);
bl->swap(p->first);
}
it->second.erase(p);
if (it->second.empty()) out_q.erase(it->first);
}
return m;
}
/**
* Client Protocol V1
**/
CtPtr ProtocolV1::send_client_banner() {
ldout(cct, 20) << __func__ << dendl;
state = CONNECTING;
ceph::buffer::list bl;
bl.append(CEPH_BANNER, strlen(CEPH_BANNER));
return WRITE(bl, handle_client_banner_write);
}
CtPtr ProtocolV1::handle_client_banner_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " write client banner failed" << dendl;
return _fault();
}
ldout(cct, 10) << __func__ << " connect write banner done: "
<< connection->get_peer_addr() << dendl;
return wait_server_banner();
}
CtPtr ProtocolV1::wait_server_banner() {
state = CONNECTING_WAIT_BANNER_AND_IDENTIFY;
ldout(cct, 20) << __func__ << dendl;
ceph::buffer::list myaddrbl;
unsigned banner_len = strlen(CEPH_BANNER);
unsigned need_len = banner_len + sizeof(ceph_entity_addr) * 2;
return READ(need_len, handle_server_banner_and_identify);
}
CtPtr ProtocolV1::handle_server_banner_and_identify(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read banner and identify addresses failed"
<< dendl;
return _fault();
}
unsigned banner_len = strlen(CEPH_BANNER);
if (memcmp(buffer, CEPH_BANNER, banner_len)) {
ldout(cct, 0) << __func__ << " connect protocol error (bad banner) on peer "
<< connection->get_peer_addr() << dendl;
return _fault();
}
ceph::buffer::list bl;
entity_addr_t paddr, peer_addr_for_me;
bl.append(buffer + banner_len, sizeof(ceph_entity_addr) * 2);
auto p = bl.cbegin();
try {
decode(paddr, p);
decode(peer_addr_for_me, p);
} catch (const ceph::buffer::error &e) {
lderr(cct) << __func__ << " decode peer addr failed " << dendl;
return _fault();
}
ldout(cct, 20) << __func__ << " connect read peer addr " << paddr
<< " on socket " << connection->cs.fd() << dendl;
entity_addr_t peer_addr = connection->peer_addrs->legacy_addr();
if (peer_addr != paddr) {
if (paddr.is_blank_ip() && peer_addr.get_port() == paddr.get_port() &&
peer_addr.get_nonce() == paddr.get_nonce()) {
ldout(cct, 0) << __func__ << " connect claims to be " << paddr << " not "
<< peer_addr << " - presumably this is the same node!"
<< dendl;
} else {
ldout(cct, 10) << __func__ << " connect claims to be " << paddr << " not "
<< peer_addr << dendl;
return _fault();
}
}
ldout(cct, 20) << __func__ << " connect peer addr for me is "
<< peer_addr_for_me << dendl;
if (messenger->get_myaddrs().empty() ||
messenger->get_myaddrs().front().is_blank_ip()) {
sockaddr_storage ss;
socklen_t len = sizeof(ss);
getsockname(connection->cs.fd(), (sockaddr *)&ss, &len);
entity_addr_t a;
if (cct->_conf->ms_learn_addr_from_peer) {
ldout(cct, 1) << __func__ << " peer " << connection->target_addr
<< " says I am " << peer_addr_for_me << " (socket says "
<< (sockaddr*)&ss << ")" << dendl;
a = peer_addr_for_me;
} else {
ldout(cct, 1) << __func__ << " socket to " << connection->target_addr
<< " says I am " << (sockaddr*)&ss
<< " (peer says " << peer_addr_for_me << ")" << dendl;
a.set_sockaddr((sockaddr *)&ss);
}
a.set_type(entity_addr_t::TYPE_LEGACY); // anything but NONE; learned_addr ignores this
a.set_port(0);
connection->lock.unlock();
messenger->learned_addr(a);
if (cct->_conf->ms_inject_internal_delays &&
cct->_conf->ms_inject_socket_failures) {
if (rand() % cct->_conf->ms_inject_socket_failures == 0) {
ldout(cct, 10) << __func__ << " sleep for "
<< cct->_conf->ms_inject_internal_delays << dendl;
utime_t t;
t.set_from_double(cct->_conf->ms_inject_internal_delays);
t.sleep();
}
}
connection->lock.lock();
if (state != CONNECTING_WAIT_BANNER_AND_IDENTIFY) {
ldout(cct, 1) << __func__
<< " state changed while learned_addr, mark_down or "
<< " replacing must be happened just now" << dendl;
return nullptr;
}
}
ceph::buffer::list myaddrbl;
encode(messenger->get_myaddr_legacy(), myaddrbl, 0); // legacy
return WRITE(myaddrbl, handle_my_addr_write);
}
CtPtr ProtocolV1::handle_my_addr_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 2) << __func__ << " connect couldn't write my addr, "
<< cpp_strerror(r) << dendl;
return _fault();
}
ldout(cct, 10) << __func__ << " connect sent my addr "
<< messenger->get_myaddr_legacy() << dendl;
return CONTINUE(send_connect_message);
}
CtPtr ProtocolV1::send_connect_message()
{
state = CONNECTING_SEND_CONNECT_MSG;
ldout(cct, 20) << __func__ << dendl;
ceph_assert(messenger->auth_client);
ceph::buffer::list auth_bl;
vector<uint32_t> preferred_modes;
if (connection->peer_type != CEPH_ENTITY_TYPE_MON ||
messenger->get_myname().type() == CEPH_ENTITY_TYPE_MON) {
if (authorizer_more.length()) {
ldout(cct,10) << __func__ << " using augmented (challenge) auth payload"
<< dendl;
auth_bl = authorizer_more;
} else {
auto am = auth_meta;
authorizer_more.clear();
connection->lock.unlock();
int r = messenger->auth_client->get_auth_request(
connection, am.get(),
&am->auth_method, &preferred_modes, &auth_bl);
connection->lock.lock();
if (r < 0) {
return _fault();
}
if (state != CONNECTING_SEND_CONNECT_MSG) {
ldout(cct, 1) << __func__ << " state changed!" << dendl;
return _fault();
}
}
}
ceph_msg_connect connect;
connect.features = connection->policy.features_supported;
connect.host_type = messenger->get_myname().type();
connect.global_seq = global_seq;
connect.connect_seq = connect_seq;
connect.protocol_version =
messenger->get_proto_version(connection->peer_type, true);
if (auth_bl.length()) {
ldout(cct, 10) << __func__
<< " connect_msg.authorizer_len=" << auth_bl.length()
<< " protocol=" << auth_meta->auth_method << dendl;
connect.authorizer_protocol = auth_meta->auth_method;
connect.authorizer_len = auth_bl.length();
} else {
connect.authorizer_protocol = 0;
connect.authorizer_len = 0;
}
connect.flags = 0;
if (connection->policy.lossy) {
connect.flags |=
CEPH_MSG_CONNECT_LOSSY; // this is fyi, actually, server decides!
}
ceph::buffer::list bl;
bl.append((char *)&connect, sizeof(connect));
if (auth_bl.length()) {
bl.append(auth_bl.c_str(), auth_bl.length());
}
ldout(cct, 10) << __func__ << " connect sending gseq=" << global_seq
<< " cseq=" << connect_seq
<< " proto=" << connect.protocol_version << dendl;
return WRITE(bl, handle_connect_message_write);
}
CtPtr ProtocolV1::handle_connect_message_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 2) << __func__ << " connect couldn't send reply "
<< cpp_strerror(r) << dendl;
return _fault();
}
ldout(cct, 20) << __func__
<< " connect wrote (self +) cseq, waiting for reply" << dendl;
return wait_connect_reply();
}
CtPtr ProtocolV1::wait_connect_reply() {
ldout(cct, 20) << __func__ << dendl;
// FIPS zeroization audit 20191115: this memset is not security related.
memset(&connect_reply, 0, sizeof(connect_reply));
return READ(sizeof(connect_reply), handle_connect_reply_1);
}
CtPtr ProtocolV1::handle_connect_reply_1(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read connect reply failed" << dendl;
return _fault();
}
connect_reply = *((ceph_msg_connect_reply *)buffer);
ldout(cct, 20) << __func__ << " connect got reply tag "
<< (int)connect_reply.tag << " connect_seq "
<< connect_reply.connect_seq << " global_seq "
<< connect_reply.global_seq << " proto "
<< connect_reply.protocol_version << " flags "
<< (int)connect_reply.flags << " features "
<< connect_reply.features << dendl;
if (connect_reply.authorizer_len) {
return wait_connect_reply_auth();
}
return handle_connect_reply_2();
}
CtPtr ProtocolV1::wait_connect_reply_auth() {
ldout(cct, 20) << __func__ << dendl;
ldout(cct, 10) << __func__
<< " reply.authorizer_len=" << connect_reply.authorizer_len
<< dendl;
ceph_assert(connect_reply.authorizer_len < 4096);
return READ(connect_reply.authorizer_len, handle_connect_reply_auth);
}
CtPtr ProtocolV1::handle_connect_reply_auth(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read connect reply authorizer failed"
<< dendl;
return _fault();
}
ceph::buffer::list authorizer_reply;
authorizer_reply.append(buffer, connect_reply.authorizer_len);
if (connection->peer_type != CEPH_ENTITY_TYPE_MON ||
messenger->get_myname().type() == CEPH_ENTITY_TYPE_MON) {
auto am = auth_meta;
bool more = (connect_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER);
ceph::buffer::list auth_retry_bl;
int r;
connection->lock.unlock();
if (more) {
r = messenger->auth_client->handle_auth_reply_more(
connection, am.get(), authorizer_reply, &auth_retry_bl);
} else {
// these aren't used for v1
CryptoKey skey;
string con_secret;
r = messenger->auth_client->handle_auth_done(
connection, am.get(),
0 /* global id */, 0 /* con mode */,
authorizer_reply,
&skey, &con_secret);
}
connection->lock.lock();
if (state != CONNECTING_SEND_CONNECT_MSG) {
ldout(cct, 1) << __func__ << " state changed" << dendl;
return _fault();
}
if (r < 0) {
return _fault();
}
if (more && r == 0) {
authorizer_more = auth_retry_bl;
return CONTINUE(send_connect_message);
}
}
return handle_connect_reply_2();
}
CtPtr ProtocolV1::handle_connect_reply_2() {
ldout(cct, 20) << __func__ << dendl;
if (connect_reply.tag == CEPH_MSGR_TAG_FEATURES) {
ldout(cct, 0) << __func__ << " connect protocol feature mismatch, my "
<< std::hex << connection->policy.features_supported
<< " < peer " << connect_reply.features << " missing "
<< (connect_reply.features &
~connection->policy.features_supported)
<< std::dec << dendl;
return _fault();
}
if (connect_reply.tag == CEPH_MSGR_TAG_BADPROTOVER) {
ldout(cct, 0) << __func__ << " connect protocol version mismatch, my "
<< messenger->get_proto_version(connection->peer_type, true)
<< " != " << connect_reply.protocol_version << dendl;
return _fault();
}
if (connect_reply.tag == CEPH_MSGR_TAG_BADAUTHORIZER) {
ldout(cct, 0) << __func__ << " connect got BADAUTHORIZER" << dendl;
authorizer_more.clear();
return _fault();
}
if (connect_reply.tag == CEPH_MSGR_TAG_RESETSESSION) {
ldout(cct, 0) << __func__ << " connect got RESETSESSION" << dendl;
session_reset();
connect_seq = 0;
// see session_reset
connection->outgoing_bl.clear();
return CONTINUE(send_connect_message);
}
if (connect_reply.tag == CEPH_MSGR_TAG_RETRY_GLOBAL) {
global_seq = messenger->get_global_seq(connect_reply.global_seq);
ldout(cct, 5) << __func__ << " connect got RETRY_GLOBAL "
<< connect_reply.global_seq << " chose new " << global_seq
<< dendl;
return CONTINUE(send_connect_message);
}
if (connect_reply.tag == CEPH_MSGR_TAG_RETRY_SESSION) {
ceph_assert(connect_reply.connect_seq > connect_seq);
ldout(cct, 5) << __func__ << " connect got RETRY_SESSION " << connect_seq
<< " -> " << connect_reply.connect_seq << dendl;
connect_seq = connect_reply.connect_seq;
return CONTINUE(send_connect_message);
}
if (connect_reply.tag == CEPH_MSGR_TAG_WAIT) {
ldout(cct, 1) << __func__ << " connect got WAIT (connection race)" << dendl;
state = WAIT;
return _fault();
}
uint64_t feat_missing;
feat_missing =
connection->policy.features_required & ~(uint64_t)connect_reply.features;
if (feat_missing) {
ldout(cct, 1) << __func__ << " missing required features " << std::hex
<< feat_missing << std::dec << dendl;
return _fault();
}
if (connect_reply.tag == CEPH_MSGR_TAG_SEQ) {
ldout(cct, 10)
<< __func__
<< " got CEPH_MSGR_TAG_SEQ, reading acked_seq and writing in_seq"
<< dendl;
return wait_ack_seq();
}
if (connect_reply.tag == CEPH_MSGR_TAG_READY) {
ldout(cct, 10) << __func__ << " got CEPH_MSGR_TAG_READY " << dendl;
}
return client_ready();
}
CtPtr ProtocolV1::wait_ack_seq() {
ldout(cct, 20) << __func__ << dendl;
return READ(sizeof(uint64_t), handle_ack_seq);
}
CtPtr ProtocolV1::handle_ack_seq(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read connect ack seq failed" << dendl;
return _fault();
}
uint64_t newly_acked_seq = 0;
newly_acked_seq = *((uint64_t *)buffer);
ldout(cct, 2) << __func__ << " got newly_acked_seq " << newly_acked_seq
<< " vs out_seq " << out_seq << dendl;
out_seq = discard_requeued_up_to(out_seq, newly_acked_seq);
ceph::buffer::list bl;
uint64_t s = in_seq;
bl.append((char *)&s, sizeof(s));
return WRITE(bl, handle_in_seq_write);
}
CtPtr ProtocolV1::handle_in_seq_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 10) << __func__ << " failed to send in_seq " << dendl;
return _fault();
}
ldout(cct, 10) << __func__ << " send in_seq done " << dendl;
return client_ready();
}
CtPtr ProtocolV1::client_ready() {
ldout(cct, 20) << __func__ << dendl;
// hooray!
peer_global_seq = connect_reply.global_seq;
connection->policy.lossy = connect_reply.flags & CEPH_MSG_CONNECT_LOSSY;
once_ready = true;
connect_seq += 1;
ceph_assert(connect_seq == connect_reply.connect_seq);
backoff = utime_t();
connection->set_features((uint64_t)connect_reply.features &
(uint64_t)connection->policy.features_supported);
ldout(cct, 10) << __func__ << " connect success " << connect_seq
<< ", lossy = " << connection->policy.lossy << ", features "
<< connection->get_features() << dendl;
// If we have an authorizer, get a new AuthSessionHandler to deal with
// ongoing security of the connection. PLR
if (auth_meta->authorizer) {
ldout(cct, 10) << __func__ << " setting up session_security with auth "
<< auth_meta->authorizer.get() << dendl;
session_security.reset(get_auth_session_handler(
cct, auth_meta->authorizer->protocol,
auth_meta->session_key,
connection->get_features()));
} else {
// We have no authorizer, so we shouldn't be applying security to messages
// in this AsyncConnection. PLR
ldout(cct, 10) << __func__ << " no authorizer, clearing session_security"
<< dendl;
session_security.reset();
}
if (connection->delay_state) {
ceph_assert(connection->delay_state->ready());
}
connection->dispatch_queue->queue_connect(connection);
messenger->ms_deliver_handle_fast_connect(connection);
return ready();
}
/**
* Server Protocol V1
**/
CtPtr ProtocolV1::send_server_banner() {
ldout(cct, 20) << __func__ << dendl;
state = ACCEPTING;
ceph::buffer::list bl;
bl.append(CEPH_BANNER, strlen(CEPH_BANNER));
// as a server, we should have a legacy addr if we accepted this connection.
auto legacy = messenger->get_myaddrs().legacy_addr();
encode(legacy, bl, 0); // legacy
connection->port = legacy.get_port();
encode(connection->target_addr, bl, 0); // legacy
ldout(cct, 1) << __func__ << " sd=" << connection->cs.fd()
<< " legacy " << legacy
<< " socket_addr " << connection->socket_addr
<< " target_addr " << connection->target_addr
<< dendl;
return WRITE(bl, handle_server_banner_write);
}
CtPtr ProtocolV1::handle_server_banner_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << " write server banner failed" << dendl;
return _fault();
}
ldout(cct, 10) << __func__ << " write banner and addr done: "
<< connection->get_peer_addr() << dendl;
return wait_client_banner();
}
CtPtr ProtocolV1::wait_client_banner() {
ldout(cct, 20) << __func__ << dendl;
return READ(strlen(CEPH_BANNER) + sizeof(ceph_entity_addr),
handle_client_banner);
}
CtPtr ProtocolV1::handle_client_banner(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read peer banner and addr failed" << dendl;
return _fault();
}
if (memcmp(buffer, CEPH_BANNER, strlen(CEPH_BANNER))) {
ldout(cct, 1) << __func__ << " accept peer sent bad banner '" << buffer
<< "' (should be '" << CEPH_BANNER << "')" << dendl;
return _fault();
}
ceph::buffer::list addr_bl;
entity_addr_t peer_addr;
addr_bl.append(buffer + strlen(CEPH_BANNER), sizeof(ceph_entity_addr));
try {
auto ti = addr_bl.cbegin();
decode(peer_addr, ti);
} catch (const ceph::buffer::error &e) {
lderr(cct) << __func__ << " decode peer_addr failed " << dendl;
return _fault();
}
ldout(cct, 10) << __func__ << " accept peer addr is " << peer_addr << dendl;
if (peer_addr.is_blank_ip()) {
// peer apparently doesn't know what ip they have; figure it out for them.
int port = peer_addr.get_port();
peer_addr.set_sockaddr(connection->target_addr.get_sockaddr());
peer_addr.set_port(port);
ldout(cct, 0) << __func__ << " accept peer addr is really " << peer_addr
<< " (socket is " << connection->target_addr << ")" << dendl;
}
connection->set_peer_addr(peer_addr); // so that connection_state gets set up
connection->target_addr = peer_addr;
return CONTINUE(wait_connect_message);
}
CtPtr ProtocolV1::wait_connect_message() {
ldout(cct, 20) << __func__ << dendl;
// FIPS zeroization audit 20191115: this memset is not security related.
memset(&connect_msg, 0, sizeof(connect_msg));
return READ(sizeof(connect_msg), handle_connect_message_1);
}
CtPtr ProtocolV1::handle_connect_message_1(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read connect msg failed" << dendl;
return _fault();
}
connect_msg = *((ceph_msg_connect *)buffer);
state = ACCEPTING_WAIT_CONNECT_MSG_AUTH;
if (connect_msg.authorizer_len) {
return wait_connect_message_auth();
}
return handle_connect_message_2();
}
CtPtr ProtocolV1::wait_connect_message_auth() {
ldout(cct, 20) << __func__ << dendl;
authorizer_buf.clear();
authorizer_buf.push_back(ceph::buffer::create(connect_msg.authorizer_len));
return READB(connect_msg.authorizer_len, authorizer_buf.c_str(),
handle_connect_message_auth);
}
CtPtr ProtocolV1::handle_connect_message_auth(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read connect authorizer failed" << dendl;
return _fault();
}
return handle_connect_message_2();
}
CtPtr ProtocolV1::handle_connect_message_2() {
ldout(cct, 20) << __func__ << dendl;
ldout(cct, 20) << __func__ << " accept got peer connect_seq "
<< connect_msg.connect_seq << " global_seq "
<< connect_msg.global_seq << dendl;
connection->set_peer_type(connect_msg.host_type);
connection->policy = messenger->get_policy(connect_msg.host_type);
ldout(cct, 10) << __func__ << " accept of host_type " << connect_msg.host_type
<< ", policy.lossy=" << connection->policy.lossy
<< " policy.server=" << connection->policy.server
<< " policy.standby=" << connection->policy.standby
<< " policy.resetcheck=" << connection->policy.resetcheck
<< " features 0x" << std::hex << (uint64_t)connect_msg.features
<< std::dec
<< dendl;
ceph_msg_connect_reply reply;
ceph::buffer::list authorizer_reply;
// FIPS zeroization audit 20191115: this memset is not security related.
memset(&reply, 0, sizeof(reply));
reply.protocol_version =
messenger->get_proto_version(connection->peer_type, false);
// mismatch?
ldout(cct, 10) << __func__ << " accept my proto " << reply.protocol_version
<< ", their proto " << connect_msg.protocol_version << dendl;
if (connect_msg.protocol_version != reply.protocol_version) {
return send_connect_message_reply(CEPH_MSGR_TAG_BADPROTOVER, reply,
authorizer_reply);
}
// require signatures for cephx?
if (connect_msg.authorizer_protocol == CEPH_AUTH_CEPHX) {
if (connection->peer_type == CEPH_ENTITY_TYPE_OSD ||
connection->peer_type == CEPH_ENTITY_TYPE_MDS ||
connection->peer_type == CEPH_ENTITY_TYPE_MGR) {
if (cct->_conf->cephx_require_signatures ||
cct->_conf->cephx_cluster_require_signatures) {
ldout(cct, 10)
<< __func__
<< " using cephx, requiring MSG_AUTH feature bit for cluster"
<< dendl;
connection->policy.features_required |= CEPH_FEATURE_MSG_AUTH;
}
if (cct->_conf->cephx_require_version >= 2 ||
cct->_conf->cephx_cluster_require_version >= 2) {
ldout(cct, 10)
<< __func__
<< " using cephx, requiring cephx v2 feature bit for cluster"
<< dendl;
connection->policy.features_required |= CEPH_FEATUREMASK_CEPHX_V2;
}
} else {
if (cct->_conf->cephx_require_signatures ||
cct->_conf->cephx_service_require_signatures) {
ldout(cct, 10)
<< __func__
<< " using cephx, requiring MSG_AUTH feature bit for service"
<< dendl;
connection->policy.features_required |= CEPH_FEATURE_MSG_AUTH;
}
if (cct->_conf->cephx_require_version >= 2 ||
cct->_conf->cephx_service_require_version >= 2) {
ldout(cct, 10)
<< __func__
<< " using cephx, requiring cephx v2 feature bit for service"
<< dendl;
connection->policy.features_required |= CEPH_FEATUREMASK_CEPHX_V2;
}
}
}
uint64_t feat_missing =
connection->policy.features_required & ~(uint64_t)connect_msg.features;
if (feat_missing) {
ldout(cct, 1) << __func__ << " peer missing required features " << std::hex
<< feat_missing << std::dec << dendl;
return send_connect_message_reply(CEPH_MSGR_TAG_FEATURES, reply,
authorizer_reply);
}
ceph::buffer::list auth_bl_copy = authorizer_buf;
auto am = auth_meta;
am->auth_method = connect_msg.authorizer_protocol;
if (!HAVE_FEATURE((uint64_t)connect_msg.features, CEPHX_V2)) {
// peer doesn't support it and we won't get here if we require it
am->skip_authorizer_challenge = true;
}
connection->lock.unlock();
ldout(cct,10) << __func__ << " authorizor_protocol "
<< connect_msg.authorizer_protocol
<< " len " << auth_bl_copy.length()
<< dendl;
bool more = (bool)auth_meta->authorizer_challenge;
int r = messenger->auth_server->handle_auth_request(
connection,
am.get(),
more,
am->auth_method,
auth_bl_copy,
&authorizer_reply);
if (r < 0) {
connection->lock.lock();
if (state != ACCEPTING_WAIT_CONNECT_MSG_AUTH) {
ldout(cct, 1) << __func__ << " state changed" << dendl;
return _fault();
}
ldout(cct, 0) << __func__ << ": got bad authorizer, auth_reply_len="
<< authorizer_reply.length() << dendl;
session_security.reset();
return send_connect_message_reply(CEPH_MSGR_TAG_BADAUTHORIZER, reply,
authorizer_reply);
}
if (r == 0) {
connection->lock.lock();
if (state != ACCEPTING_WAIT_CONNECT_MSG_AUTH) {
ldout(cct, 1) << __func__ << " state changed" << dendl;
return _fault();
}
ldout(cct, 10) << __func__ << ": challenging authorizer" << dendl;
ceph_assert(authorizer_reply.length());
return send_connect_message_reply(CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER,
reply, authorizer_reply);
}
// We've verified the authorizer for this AsyncConnection, so set up the
// session security structure. PLR
ldout(cct, 10) << __func__ << " accept setting up session_security." << dendl;
if (connection->policy.server &&
connection->policy.lossy &&
!connection->policy.register_lossy_clients) {
// incoming lossy client, no need to register this connection
// new session
ldout(cct, 10) << __func__ << " accept new session" << dendl;
connection->lock.lock();
return open(reply, authorizer_reply);
}
AsyncConnectionRef existing = messenger->lookup_conn(*connection->peer_addrs);
connection->inject_delay();
connection->lock.lock();
if (state != ACCEPTING_WAIT_CONNECT_MSG_AUTH) {
ldout(cct, 1) << __func__ << " state changed" << dendl;
return _fault();
}
if (existing == connection) {
existing = nullptr;
}
if (existing && existing->protocol->proto_type != 1) {
ldout(cct,1) << __func__ << " existing " << existing << " proto "
<< existing->protocol.get() << " version is "
<< existing->protocol->proto_type << ", marking down" << dendl;
existing->mark_down();
existing = nullptr;
}
if (existing) {
// There is no possible that existing connection will acquire this
// connection's lock
existing->lock.lock(); // skip lockdep check (we are locking a second
// AsyncConnection here)
ldout(cct,10) << __func__ << " existing=" << existing << " exproto="
<< existing->protocol.get() << dendl;
ProtocolV1 *exproto = dynamic_cast<ProtocolV1 *>(existing->protocol.get());
ceph_assert(exproto);
ceph_assert(exproto->proto_type == 1);
if (exproto->state == CLOSED) {
ldout(cct, 1) << __func__ << " existing " << existing
<< " already closed." << dendl;
existing->lock.unlock();
existing = nullptr;
return open(reply, authorizer_reply);
}
if (exproto->replacing) {
ldout(cct, 1) << __func__
<< " existing racing replace happened while replacing."
<< " existing_state="
<< connection->get_state_name(existing->state) << dendl;
reply.global_seq = exproto->peer_global_seq;
existing->lock.unlock();
return send_connect_message_reply(CEPH_MSGR_TAG_RETRY_GLOBAL, reply,
authorizer_reply);
}
if (connect_msg.global_seq < exproto->peer_global_seq) {
ldout(cct, 10) << __func__ << " accept existing " << existing << ".gseq "
<< exproto->peer_global_seq << " > "
<< connect_msg.global_seq << ", RETRY_GLOBAL" << dendl;
reply.global_seq = exproto->peer_global_seq; // so we can send it below..
existing->lock.unlock();
return send_connect_message_reply(CEPH_MSGR_TAG_RETRY_GLOBAL, reply,
authorizer_reply);
} else {
ldout(cct, 10) << __func__ << " accept existing " << existing << ".gseq "
<< exproto->peer_global_seq
<< " <= " << connect_msg.global_seq << ", looks ok"
<< dendl;
}
if (existing->policy.lossy) {
ldout(cct, 0)
<< __func__
<< " accept replacing existing (lossy) channel (new one lossy="
<< connection->policy.lossy << ")" << dendl;
exproto->session_reset();
return replace(existing, reply, authorizer_reply);
}
ldout(cct, 1) << __func__ << " accept connect_seq "
<< connect_msg.connect_seq
<< " vs existing csq=" << exproto->connect_seq
<< " existing_state="
<< connection->get_state_name(existing->state) << dendl;
if (connect_msg.connect_seq == 0 && exproto->connect_seq > 0) {
ldout(cct, 0)
<< __func__
<< " accept peer reset, then tried to connect to us, replacing"
<< dendl;
// this is a hard reset from peer
is_reset_from_peer = true;
if (connection->policy.resetcheck) {
exproto->session_reset(); // this resets out_queue, msg_ and
// connect_seq #'s
}
return replace(existing, reply, authorizer_reply);
}
if (connect_msg.connect_seq < exproto->connect_seq) {
// old attempt, or we sent READY but they didn't get it.
ldout(cct, 10) << __func__ << " accept existing " << existing << ".cseq "
<< exproto->connect_seq << " > " << connect_msg.connect_seq
<< ", RETRY_SESSION" << dendl;
reply.connect_seq = exproto->connect_seq + 1;
existing->lock.unlock();
return send_connect_message_reply(CEPH_MSGR_TAG_RETRY_SESSION, reply,
authorizer_reply);
}
if (connect_msg.connect_seq == exproto->connect_seq) {
// if the existing connection successfully opened, and/or
// subsequently went to standby, then the peer should bump
// their connect_seq and retry: this is not a connection race
// we need to resolve here.
if (exproto->state == OPENED || exproto->state == STANDBY) {
ldout(cct, 10) << __func__ << " accept connection race, existing "
<< existing << ".cseq " << exproto->connect_seq
<< " == " << connect_msg.connect_seq
<< ", OPEN|STANDBY, RETRY_SESSION " << dendl;
// if connect_seq both zero, dont stuck into dead lock. it's ok to
// replace
if (connection->policy.resetcheck && exproto->connect_seq == 0) {
return replace(existing, reply, authorizer_reply);
}
reply.connect_seq = exproto->connect_seq + 1;
existing->lock.unlock();
return send_connect_message_reply(CEPH_MSGR_TAG_RETRY_SESSION, reply,
authorizer_reply);
}
// connection race?
if (connection->peer_addrs->legacy_addr() < messenger->get_myaddr_legacy() ||
existing->policy.server) {
// incoming wins
ldout(cct, 10) << __func__ << " accept connection race, existing "
<< existing << ".cseq " << exproto->connect_seq
<< " == " << connect_msg.connect_seq
<< ", or we are server, replacing my attempt" << dendl;
return replace(existing, reply, authorizer_reply);
} else {
// our existing outgoing wins
ldout(messenger->cct, 10)
<< __func__ << " accept connection race, existing " << existing
<< ".cseq " << exproto->connect_seq
<< " == " << connect_msg.connect_seq << ", sending WAIT" << dendl;
ceph_assert(connection->peer_addrs->legacy_addr() >
messenger->get_myaddr_legacy());
existing->lock.unlock();
// make sure we follow through with opening the existing
// connection (if it isn't yet open) since we know the peer
// has something to send to us.
existing->send_keepalive();
return send_connect_message_reply(CEPH_MSGR_TAG_WAIT, reply,
authorizer_reply);
}
}
ceph_assert(connect_msg.connect_seq > exproto->connect_seq);
ceph_assert(connect_msg.global_seq >= exproto->peer_global_seq);
if (connection->policy.resetcheck && // RESETSESSION only used by servers;
// peers do not reset each other
exproto->connect_seq == 0) {
ldout(cct, 0) << __func__ << " accept we reset (peer sent cseq "
<< connect_msg.connect_seq << ", " << existing
<< ".cseq = " << exproto->connect_seq
<< "), sending RESETSESSION " << dendl;
existing->lock.unlock();
return send_connect_message_reply(CEPH_MSGR_TAG_RESETSESSION, reply,
authorizer_reply);
}
// reconnect
ldout(cct, 10) << __func__ << " accept peer sent cseq "
<< connect_msg.connect_seq << " > " << exproto->connect_seq
<< dendl;
return replace(existing, reply, authorizer_reply);
} // existing
else if (!replacing && connect_msg.connect_seq > 0) {
// we reset, and they are opening a new session
ldout(cct, 0) << __func__ << " accept we reset (peer sent cseq "
<< connect_msg.connect_seq << "), sending RESETSESSION"
<< dendl;
return send_connect_message_reply(CEPH_MSGR_TAG_RESETSESSION, reply,
authorizer_reply);
} else {
// new session
ldout(cct, 10) << __func__ << " accept new session" << dendl;
existing = nullptr;
return open(reply, authorizer_reply);
}
}
CtPtr ProtocolV1::send_connect_message_reply(char tag,
ceph_msg_connect_reply &reply,
ceph::buffer::list &authorizer_reply) {
ldout(cct, 20) << __func__ << dendl;
ceph::buffer::list reply_bl;
reply.tag = tag;
reply.features =
((uint64_t)connect_msg.features & connection->policy.features_supported) |
connection->policy.features_required;
reply.authorizer_len = authorizer_reply.length();
reply_bl.append((char *)&reply, sizeof(reply));
ldout(cct, 10) << __func__ << " reply features 0x" << std::hex
<< reply.features << " = (policy sup 0x"
<< connection->policy.features_supported
<< " & connect 0x" << (uint64_t)connect_msg.features
<< ") | policy req 0x"
<< connection->policy.features_required
<< dendl;
if (reply.authorizer_len) {
reply_bl.append(authorizer_reply.c_str(), authorizer_reply.length());
authorizer_reply.clear();
}
return WRITE(reply_bl, handle_connect_message_reply_write);
}
CtPtr ProtocolV1::handle_connect_message_reply_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << " write connect message reply failed" << dendl;
connection->inject_delay();
return _fault();
}
return CONTINUE(wait_connect_message);
}
CtPtr ProtocolV1::replace(const AsyncConnectionRef& existing,
ceph_msg_connect_reply &reply,
ceph::buffer::list &authorizer_reply) {
ldout(cct, 10) << __func__ << " accept replacing " << existing << dendl;
connection->inject_delay();
if (existing->policy.lossy) {
// disconnect from the Connection
ldout(cct, 1) << __func__ << " replacing on lossy channel, failing existing"
<< dendl;
existing->protocol->stop();
existing->dispatch_queue->queue_reset(existing.get());
} else {
ceph_assert(can_write == WriteStatus::NOWRITE);
existing->write_lock.lock();
ProtocolV1 *exproto = dynamic_cast<ProtocolV1 *>(existing->protocol.get());
// reset the in_seq if this is a hard reset from peer,
// otherwise we respect our original connection's value
if (is_reset_from_peer) {
exproto->is_reset_from_peer = true;
}
connection->center->delete_file_event(connection->cs.fd(),
EVENT_READABLE | EVENT_WRITABLE);
if (existing->delay_state) {
existing->delay_state->flush();
ceph_assert(!connection->delay_state);
}
exproto->reset_recv_state();
exproto->connect_msg.features = connect_msg.features;
auto temp_cs = std::move(connection->cs);
EventCenter *new_center = connection->center;
Worker *new_worker = connection->worker;
// avoid _stop shutdown replacing socket
// queue a reset on the new connection, which we're dumping for the old
stop();
connection->dispatch_queue->queue_reset(connection);
ldout(messenger->cct, 1)
<< __func__ << " stop myself to swap existing" << dendl;
exproto->can_write = WriteStatus::REPLACING;
exproto->replacing = true;
exproto->write_in_progress = false;
existing->state_offset = 0;
// avoid previous thread modify event
exproto->state = NONE;
existing->state = AsyncConnection::STATE_NONE;
// Discard existing prefetch buffer in `recv_buf`
existing->recv_start = existing->recv_end = 0;
// there shouldn't exist any buffer
ceph_assert(connection->recv_start == connection->recv_end);
auto deactivate_existing = std::bind(
[existing, new_worker, new_center, exproto, reply,
authorizer_reply](ConnectedSocket &cs) mutable {
// we need to delete time event in original thread
{
std::lock_guard<std::mutex> l(existing->lock);
existing->write_lock.lock();
exproto->requeue_sent();
existing->outgoing_bl.clear();
existing->open_write = false;
existing->write_lock.unlock();
if (exproto->state == NONE) {
existing->shutdown_socket();
existing->cs = std::move(cs);
existing->worker->references--;
new_worker->references++;
existing->logger = new_worker->get_perf_counter();
existing->labeled_logger = new_worker->get_labeled_perf_counter();
existing->worker = new_worker;
existing->center = new_center;
if (existing->delay_state)
existing->delay_state->set_center(new_center);
} else if (exproto->state == CLOSED) {
auto back_to_close =
std::bind([](ConnectedSocket &cs) mutable { cs.close(); },
std::move(cs));
new_center->submit_to(new_center->get_id(),
std::move(back_to_close), true);
return;
} else {
ceph_abort();
}
}
// Before changing existing->center, it may already exists some
// events in existing->center's queue. Then if we mark down
// `existing`, it will execute in another thread and clean up
// connection. Previous event will result in segment fault
auto transfer_existing = [existing, exproto, reply,
authorizer_reply]() mutable {
std::lock_guard<std::mutex> l(existing->lock);
if (exproto->state == CLOSED) return;
ceph_assert(exproto->state == NONE);
// we have called shutdown_socket above
ceph_assert(existing->last_tick_id == 0);
// restart timer since we are going to re-build connection
existing->last_connect_started = ceph::coarse_mono_clock::now();
existing->last_tick_id = existing->center->create_time_event(
existing->connect_timeout_us, existing->tick_handler);
existing->state = AsyncConnection::STATE_CONNECTION_ESTABLISHED;
exproto->state = ACCEPTING;
existing->center->create_file_event(
existing->cs.fd(), EVENT_READABLE, existing->read_handler);
reply.global_seq = exproto->peer_global_seq;
exproto->run_continuation(exproto->send_connect_message_reply(
CEPH_MSGR_TAG_RETRY_GLOBAL, reply, authorizer_reply));
};
if (existing->center->in_thread())
transfer_existing();
else
existing->center->submit_to(existing->center->get_id(),
std::move(transfer_existing), true);
},
std::move(temp_cs));
existing->center->submit_to(existing->center->get_id(),
std::move(deactivate_existing), true);
existing->write_lock.unlock();
existing->lock.unlock();
return nullptr;
}
existing->lock.unlock();
return open(reply, authorizer_reply);
}
CtPtr ProtocolV1::open(ceph_msg_connect_reply &reply,
ceph::buffer::list &authorizer_reply) {
ldout(cct, 20) << __func__ << dendl;
connect_seq = connect_msg.connect_seq + 1;
peer_global_seq = connect_msg.global_seq;
ldout(cct, 10) << __func__ << " accept success, connect_seq = " << connect_seq
<< " in_seq=" << in_seq << ", sending READY" << dendl;
// if it is a hard reset from peer, we don't need a round-trip to negotiate
// in/out sequence
if ((connect_msg.features & CEPH_FEATURE_RECONNECT_SEQ) &&
!is_reset_from_peer) {
reply.tag = CEPH_MSGR_TAG_SEQ;
wait_for_seq = true;
} else {
reply.tag = CEPH_MSGR_TAG_READY;
wait_for_seq = false;
out_seq = discard_requeued_up_to(out_seq, 0);
is_reset_from_peer = false;
in_seq = 0;
}
// send READY reply
reply.features = connection->policy.features_supported;
reply.global_seq = messenger->get_global_seq();
reply.connect_seq = connect_seq;
reply.flags = 0;
reply.authorizer_len = authorizer_reply.length();
if (connection->policy.lossy) {
reply.flags = reply.flags | CEPH_MSG_CONNECT_LOSSY;
}
connection->set_features((uint64_t)reply.features &
(uint64_t)connect_msg.features);
ldout(cct, 10) << __func__ << " accept features "
<< connection->get_features()
<< " authorizer_protocol "
<< connect_msg.authorizer_protocol << dendl;
session_security.reset(
get_auth_session_handler(cct, auth_meta->auth_method,
auth_meta->session_key,
connection->get_features()));
ceph::buffer::list reply_bl;
reply_bl.append((char *)&reply, sizeof(reply));
if (reply.authorizer_len) {
reply_bl.append(authorizer_reply.c_str(), authorizer_reply.length());
}
if (reply.tag == CEPH_MSGR_TAG_SEQ) {
uint64_t s = in_seq;
reply_bl.append((char *)&s, sizeof(s));
}
connection->lock.unlock();
// Because "replacing" will prevent other connections preempt this addr,
// it's safe that here we don't acquire Connection's lock
ssize_t r = messenger->accept_conn(connection);
connection->inject_delay();
connection->lock.lock();
replacing = false;
if (r < 0) {
ldout(cct, 1) << __func__ << " existing race replacing process for addr = "
<< connection->peer_addrs->legacy_addr()
<< " just fail later one(this)" << dendl;
ldout(cct, 10) << "accept fault after register" << dendl;
connection->inject_delay();
return _fault();
}
if (state != ACCEPTING_WAIT_CONNECT_MSG_AUTH) {
ldout(cct, 1) << __func__
<< " state changed while accept_conn, it must be mark_down"
<< dendl;
ceph_assert(state == CLOSED || state == NONE);
ldout(cct, 10) << "accept fault after register" << dendl;
messenger->unregister_conn(connection);
connection->inject_delay();
return _fault();
}
return WRITE(reply_bl, handle_ready_connect_message_reply_write);
}
CtPtr ProtocolV1::handle_ready_connect_message_reply_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " write ready connect message reply failed"
<< dendl;
return _fault();
}
// notify
connection->dispatch_queue->queue_accept(connection);
messenger->ms_deliver_handle_fast_accept(connection);
once_ready = true;
state = ACCEPTING_HANDLED_CONNECT_MSG;
if (wait_for_seq) {
return wait_seq();
}
return server_ready();
}
CtPtr ProtocolV1::wait_seq() {
ldout(cct, 20) << __func__ << dendl;
return READ(sizeof(uint64_t), handle_seq);
}
CtPtr ProtocolV1::handle_seq(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read ack seq failed" << dendl;
return _fault();
}
uint64_t newly_acked_seq = *(uint64_t *)buffer;
ldout(cct, 2) << __func__ << " accept get newly_acked_seq " << newly_acked_seq
<< dendl;
out_seq = discard_requeued_up_to(out_seq, newly_acked_seq);
return server_ready();
}
CtPtr ProtocolV1::server_ready() {
ldout(cct, 20) << __func__ << " session_security is "
<< session_security
<< dendl;
ldout(cct, 20) << __func__ << " accept done" << dendl;
// FIPS zeroization audit 20191115: this memset is not security related.
memset(&connect_msg, 0, sizeof(connect_msg));
if (connection->delay_state) {
ceph_assert(connection->delay_state->ready());
}
return ready();
}
| 88,685 | 32.772277 | 107 |
cc
|
null |
ceph-main/src/msg/async/ProtocolV1.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef _MSG_ASYNC_PROTOCOL_V1_
#define _MSG_ASYNC_PROTOCOL_V1_
#include "Protocol.h"
class ProtocolV1;
using CtPtr = Ct<ProtocolV1>*;
class ProtocolV1 : public Protocol {
/*
* ProtocolV1 State Machine
*
send_server_banner send_client_banner
| |
v v
wait_client_banner wait_server_banner
| |
| v
v handle_server_banner_and_identify
wait_connect_message <---------\ |
| | | v
| wait_connect_message_auth | send_connect_message <----------\
| | | | |
v v | | |
handle_connect_message_2 | v |
| | | wait_connect_reply |
v v | | | |
replace -> send_connect_message_reply | V |
| | wait_connect_reply_auth |
| | | |
v v v |
open ---\ handle_connect_reply_2 --------/
| | |
| v v
| wait_seq wait_ack_seq
| | |
v v v
server_ready client_ready
| |
\------------------> wait_message <------------/
| ^ | ^
/------------------------/ | | |
| | | \----------------- ------------\
v /----------/ v |
handle_keepalive2 | handle_message_header read_message_footer
handle_keepalive2_ack | | ^
handle_tag_ack | v |
| | throttle_message read_message_data
\----------------/ | ^
v |
read_message_front --> read_message_middle --/
*/
protected:
enum State {
NONE = 0,
START_CONNECT,
CONNECTING,
CONNECTING_WAIT_BANNER_AND_IDENTIFY,
CONNECTING_SEND_CONNECT_MSG,
START_ACCEPT,
ACCEPTING,
ACCEPTING_WAIT_CONNECT_MSG_AUTH,
ACCEPTING_HANDLED_CONNECT_MSG,
OPENED,
THROTTLE_MESSAGE,
THROTTLE_BYTES,
THROTTLE_DISPATCH_QUEUE,
READ_MESSAGE_FRONT,
READ_FOOTER_AND_DISPATCH,
CLOSED,
WAIT,
STANDBY
};
static const char *get_state_name(int state) {
const char *const statenames[] = {"NONE",
"START_CONNECT",
"CONNECTING",
"CONNECTING_WAIT_BANNER_AND_IDENTIFY",
"CONNECTING_SEND_CONNECT_MSG",
"START_ACCEPT",
"ACCEPTING",
"ACCEPTING_WAIT_CONNECT_MSG_AUTH",
"ACCEPTING_HANDLED_CONNECT_MSG",
"OPENED",
"THROTTLE_MESSAGE",
"THROTTLE_BYTES",
"THROTTLE_DISPATCH_QUEUE",
"READ_MESSAGE_FRONT",
"READ_FOOTER_AND_DISPATCH",
"CLOSED",
"WAIT",
"STANDBY"};
return statenames[state];
}
char *temp_buffer;
enum class WriteStatus { NOWRITE, REPLACING, CANWRITE, CLOSED };
std::atomic<WriteStatus> can_write;
std::list<Message *> sent; // the first ceph::buffer::list need to inject seq
// priority queue for outbound msgs
std::map<int, std::list<std::pair<ceph::buffer::list, Message *>>> out_q;
bool keepalive;
bool write_in_progress = false;
__u32 connect_seq, peer_global_seq;
std::atomic<uint64_t> in_seq{0};
std::atomic<uint64_t> out_seq{0};
std::atomic<uint64_t> ack_left{0};
std::shared_ptr<AuthSessionHandler> session_security;
// Open state
ceph_msg_connect connect_msg;
ceph_msg_connect_reply connect_reply;
ceph::buffer::list authorizer_buf; // auth(orizer) payload read off the wire
ceph::buffer::list authorizer_more; // connect-side auth retry (we added challenge)
utime_t backoff; // backoff time
utime_t recv_stamp;
utime_t throttle_stamp;
unsigned msg_left;
uint64_t cur_msg_size;
ceph_msg_header current_header;
ceph::buffer::list data_buf;
ceph::buffer::list::iterator data_blp;
ceph::buffer::list front, middle, data;
bool replacing; // when replacing process happened, we will reply connect
// side with RETRY tag and accept side will clear replaced
// connection. So when connect side reissue connect_msg,
// there won't exists conflicting connection so we use
// "replacing" to skip RESETSESSION to avoid detect wrong
// presentation
bool is_reset_from_peer;
bool once_ready;
State state;
void run_continuation(CtPtr pcontinuation);
CtPtr read(CONTINUATION_RX_TYPE<ProtocolV1> &next, int len,
char *buffer = nullptr);
CtPtr write(CONTINUATION_TX_TYPE<ProtocolV1> &next,ceph::buffer::list &bl);
inline CtPtr _fault() { // helper fault method that stops continuation
fault();
return nullptr;
}
CONTINUATION_DECL(ProtocolV1, wait_message);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_message);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_keepalive2);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_keepalive2_ack);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_tag_ack);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_message_header);
CONTINUATION_DECL(ProtocolV1, throttle_message);
CONTINUATION_DECL(ProtocolV1, throttle_bytes);
CONTINUATION_DECL(ProtocolV1, throttle_dispatch_queue);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_message_front);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_message_middle);
CONTINUATION_DECL(ProtocolV1, read_message_data);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_message_data);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_message_footer);
CtPtr ready();
CtPtr wait_message();
CtPtr handle_message(char *buffer, int r);
CtPtr handle_keepalive2(char *buffer, int r);
void append_keepalive_or_ack(bool ack = false, utime_t *t = nullptr);
CtPtr handle_keepalive2_ack(char *buffer, int r);
CtPtr handle_tag_ack(char *buffer, int r);
CtPtr handle_message_header(char *buffer, int r);
CtPtr throttle_message();
CtPtr throttle_bytes();
CtPtr throttle_dispatch_queue();
CtPtr read_message_front();
CtPtr handle_message_front(char *buffer, int r);
CtPtr read_message_middle();
CtPtr handle_message_middle(char *buffer, int r);
CtPtr read_message_data_prepare();
CtPtr read_message_data();
CtPtr handle_message_data(char *buffer, int r);
CtPtr read_message_footer();
CtPtr handle_message_footer(char *buffer, int r);
void session_reset();
void randomize_out_seq();
Message *_get_next_outgoing(ceph::buffer::list *bl);
void prepare_send_message(uint64_t features, Message *m, ceph::buffer::list &bl);
ssize_t write_message(Message *m, ceph::buffer::list &bl, bool more);
void requeue_sent();
uint64_t discard_requeued_up_to(uint64_t out_seq, uint64_t seq);
void discard_out_queue();
void reset_recv_state();
void reset_security();
std::ostream& _conn_prefix(std::ostream *_dout);
public:
ProtocolV1(AsyncConnection *connection);
virtual ~ProtocolV1();
virtual void connect() override;
virtual void accept() override;
virtual bool is_connected() override;
virtual void stop() override;
virtual void fault() override;
virtual void send_message(Message *m) override;
virtual void send_keepalive() override;
virtual void read_event() override;
virtual void write_event() override;
virtual bool is_queued() override;
// Client Protocol
private:
int global_seq;
CONTINUATION_DECL(ProtocolV1, send_client_banner);
WRITE_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_client_banner_write);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_server_banner_and_identify);
WRITE_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_my_addr_write);
CONTINUATION_DECL(ProtocolV1, send_connect_message);
WRITE_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_connect_message_write);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_connect_reply_1);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_connect_reply_auth);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_ack_seq);
WRITE_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_in_seq_write);
CtPtr send_client_banner();
CtPtr handle_client_banner_write(int r);
CtPtr wait_server_banner();
CtPtr handle_server_banner_and_identify(char *buffer, int r);
CtPtr handle_my_addr_write(int r);
CtPtr send_connect_message();
CtPtr handle_connect_message_write(int r);
CtPtr wait_connect_reply();
CtPtr handle_connect_reply_1(char *buffer, int r);
CtPtr wait_connect_reply_auth();
CtPtr handle_connect_reply_auth(char *buffer, int r);
CtPtr handle_connect_reply_2();
CtPtr wait_ack_seq();
CtPtr handle_ack_seq(char *buffer, int r);
CtPtr handle_in_seq_write(int r);
CtPtr client_ready();
// Server Protocol
protected:
bool wait_for_seq;
CONTINUATION_DECL(ProtocolV1, send_server_banner);
WRITE_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_server_banner_write);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_client_banner);
CONTINUATION_DECL(ProtocolV1, wait_connect_message);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_connect_message_1);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_connect_message_auth);
WRITE_HANDLER_CONTINUATION_DECL(ProtocolV1,
handle_connect_message_reply_write);
WRITE_HANDLER_CONTINUATION_DECL(ProtocolV1,
handle_ready_connect_message_reply_write);
READ_HANDLER_CONTINUATION_DECL(ProtocolV1, handle_seq);
CtPtr send_server_banner();
CtPtr handle_server_banner_write(int r);
CtPtr wait_client_banner();
CtPtr handle_client_banner(char *buffer, int r);
CtPtr wait_connect_message();
CtPtr handle_connect_message_1(char *buffer, int r);
CtPtr wait_connect_message_auth();
CtPtr handle_connect_message_auth(char *buffer, int r);
CtPtr handle_connect_message_2();
CtPtr send_connect_message_reply(char tag, ceph_msg_connect_reply &reply,
ceph::buffer::list &authorizer_reply);
CtPtr handle_connect_message_reply_write(int r);
CtPtr replace(const AsyncConnectionRef& existing, ceph_msg_connect_reply &reply,
ceph::buffer::list &authorizer_reply);
CtPtr open(ceph_msg_connect_reply &reply, ceph::buffer::list &authorizer_reply);
CtPtr handle_ready_connect_message_reply_write(int r);
CtPtr wait_seq();
CtPtr handle_seq(char *buffer, int r);
CtPtr server_ready();
};
class LoopbackProtocolV1 : public ProtocolV1 {
public:
LoopbackProtocolV1(AsyncConnection *connection) : ProtocolV1(connection) {
this->can_write = WriteStatus::CANWRITE;
}
};
#endif /* _MSG_ASYNC_PROTOCOL_V1_ */
| 12,449 | 39.953947 | 86 |
h
|
null |
ceph-main/src/msg/async/ProtocolV2.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <type_traits>
#include "ProtocolV2.h"
#include "AsyncMessenger.h"
#include "common/EventTrace.h"
#include "common/ceph_crypto.h"
#include "common/errno.h"
#include "include/random.h"
#include "auth/AuthClient.h"
#include "auth/AuthServer.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix _conn_prefix(_dout)
std::ostream &ProtocolV2::_conn_prefix(std::ostream *_dout) {
return *_dout << "--2- " << messenger->get_myaddrs() << " >> "
<< *connection->peer_addrs << " conn(" << connection << " "
<< this
<< " " << ceph_con_mode_name(auth_meta->con_mode)
<< " :" << connection->port
<< " s=" << get_state_name(state) << " pgs=" << peer_global_seq
<< " cs=" << connect_seq << " l=" << connection->policy.lossy
<< " rev1=" << HAVE_MSGR2_FEATURE(peer_supported_features,
REVISION_1)
<< " crypto rx=" << session_stream_handlers.rx.get()
<< " tx=" << session_stream_handlers.tx.get()
<< " comp rx=" << session_compression_handlers.rx.get()
<< " tx=" << session_compression_handlers.tx.get()
<< ").";
}
using namespace ceph::msgr::v2;
using CtPtr = Ct<ProtocolV2> *;
using CtRef = Ct<ProtocolV2> &;
void ProtocolV2::run_continuation(CtPtr pcontinuation) {
if (pcontinuation) {
run_continuation(*pcontinuation);
}
}
void ProtocolV2::run_continuation(CtRef continuation) {
try {
CONTINUATION_RUN(continuation)
} catch (const ceph::buffer::error &e) {
lderr(cct) << __func__ << " failed decoding of frame header: " << e.what()
<< dendl;
_fault();
} catch (const ceph::crypto::onwire::MsgAuthError &e) {
lderr(cct) << __func__ << " " << e.what() << dendl;
_fault();
} catch (const DecryptionError &) {
lderr(cct) << __func__ << " failed to decrypt frame payload" << dendl;
}
}
#define WRITE(B, D, C) write(D, CONTINUATION(C), B)
#define READ(L, C) read(CONTINUATION(C), ceph::buffer::ptr_node::create(ceph::buffer::create(L)))
#define READ_RXBUF(B, C) read(CONTINUATION(C), B)
#ifdef UNIT_TESTS_BUILT
#define INTERCEPT(S) { \
if(connection->interceptor) { \
auto a = connection->interceptor->intercept(connection, (S)); \
if (a == Interceptor::ACTION::FAIL) { \
return _fault(); \
} else if (a == Interceptor::ACTION::STOP) { \
stop(); \
connection->dispatch_queue->queue_reset(connection); \
return nullptr; \
}}}
#else
#define INTERCEPT(S)
#endif
ProtocolV2::ProtocolV2(AsyncConnection *connection)
: Protocol(2, connection),
state(NONE),
peer_supported_features(0),
client_cookie(0),
server_cookie(0),
global_seq(0),
connect_seq(0),
peer_global_seq(0),
message_seq(0),
reconnecting(false),
replacing(false),
can_write(false),
bannerExchangeCallback(nullptr),
tx_frame_asm(&session_stream_handlers, false, cct->_conf->ms_crc_data,
&session_compression_handlers),
rx_frame_asm(&session_stream_handlers, false, cct->_conf->ms_crc_data,
&session_compression_handlers),
next_tag(static_cast<Tag>(0)),
keepalive(false) {
}
ProtocolV2::~ProtocolV2() {
}
void ProtocolV2::connect() {
ldout(cct, 1) << __func__ << dendl;
state = START_CONNECT;
pre_auth.enabled = true;
}
void ProtocolV2::accept() {
ldout(cct, 1) << __func__ << dendl;
state = START_ACCEPT;
}
bool ProtocolV2::is_connected() { return can_write; }
/*
* Tears down the message queues, and removes them from the
* DispatchQueue Must hold write_lock prior to calling.
*/
void ProtocolV2::discard_out_queue() {
ldout(cct, 10) << __func__ << " started" << dendl;
for (auto p = sent.begin(); p != sent.end(); ++p) {
ldout(cct, 20) << __func__ << " discard " << *p << dendl;
(*p)->put();
}
sent.clear();
for (auto& [ prio, entries ] : out_queue) {
static_cast<void>(prio);
for (auto& entry : entries) {
ldout(cct, 20) << __func__ << " discard " << *entry.m << dendl;
entry.m->put();
}
}
out_queue.clear();
write_in_progress = false;
}
void ProtocolV2::reset_session() {
ldout(cct, 1) << __func__ << dendl;
std::lock_guard<std::mutex> l(connection->write_lock);
if (connection->delay_state) {
connection->delay_state->discard();
}
connection->dispatch_queue->discard_queue(connection->conn_id);
discard_out_queue();
connection->outgoing_bl.clear();
connection->dispatch_queue->queue_remote_reset(connection);
out_seq = 0;
in_seq = 0;
client_cookie = 0;
server_cookie = 0;
connect_seq = 0;
peer_global_seq = 0;
message_seq = 0;
ack_left = 0;
can_write = false;
}
void ProtocolV2::stop() {
ldout(cct, 1) << __func__ << dendl;
if (state == CLOSED) {
return;
}
if (connection->delay_state) connection->delay_state->flush();
std::lock_guard<std::mutex> l(connection->write_lock);
reset_recv_state();
discard_out_queue();
connection->_stop();
can_write = false;
state = CLOSED;
}
void ProtocolV2::fault() { _fault(); }
void ProtocolV2::requeue_sent() {
write_in_progress = false;
if (sent.empty()) {
return;
}
auto& rq = out_queue[CEPH_MSG_PRIO_HIGHEST];
out_seq -= sent.size();
while (!sent.empty()) {
Message *m = sent.back();
sent.pop_back();
ldout(cct, 5) << __func__ << " requeueing message m=" << m
<< " seq=" << m->get_seq() << " type=" << m->get_type() << " "
<< *m << dendl;
m->clear_payload();
rq.emplace_front(out_queue_entry_t{false, m});
}
}
uint64_t ProtocolV2::discard_requeued_up_to(uint64_t out_seq, uint64_t seq) {
ldout(cct, 10) << __func__ << " " << seq << dendl;
std::lock_guard<std::mutex> l(connection->write_lock);
if (out_queue.count(CEPH_MSG_PRIO_HIGHEST) == 0) {
return seq;
}
auto& rq = out_queue[CEPH_MSG_PRIO_HIGHEST];
uint64_t count = out_seq;
while (!rq.empty()) {
Message* const m = rq.front().m;
if (m->get_seq() == 0 || m->get_seq() > seq) break;
ldout(cct, 5) << __func__ << " discarding message m=" << m
<< " seq=" << m->get_seq() << " ack_seq=" << seq << " "
<< *m << dendl;
m->put();
rq.pop_front();
count++;
}
if (rq.empty()) out_queue.erase(CEPH_MSG_PRIO_HIGHEST);
return count;
}
void ProtocolV2::reset_security() {
ldout(cct, 5) << __func__ << dendl;
auth_meta.reset(new AuthConnectionMeta);
session_stream_handlers.rx.reset(nullptr);
session_stream_handlers.tx.reset(nullptr);
pre_auth.rxbuf.clear();
pre_auth.txbuf.clear();
}
// it's expected the `write_lock` is held while calling this method.
void ProtocolV2::reset_recv_state() {
ldout(cct, 5) << __func__ << dendl;
if (!connection->center->in_thread()) {
// execute in the same thread that uses the rx/tx handlers. We need
// to do the warp because holding `write_lock` is not enough as
// `write_event()` unlocks it just before calling `write_message()`.
// `submit_to()` here is NOT blocking.
connection->center->submit_to(connection->center->get_id(), [this] {
ldout(cct, 5) << "reset_recv_state (warped) reseting crypto and compression handlers"
<< dendl;
// Possibly unnecessary. See the comment in `deactivate_existing`.
std::lock_guard<std::mutex> l(connection->lock);
std::lock_guard<std::mutex> wl(connection->write_lock);
reset_security();
reset_compression();
}, /* always_async = */true);
} else {
reset_security();
reset_compression();
}
// clean read and write callbacks
connection->pendingReadLen.reset();
connection->writeCallback.reset();
next_tag = static_cast<Tag>(0);
reset_throttle();
}
size_t ProtocolV2::get_current_msg_size() const {
ceph_assert(rx_frame_asm.get_num_segments() > 0);
size_t sum = 0;
// we don't include SegmentIndex::Msg::HEADER.
for (size_t i = 1; i < rx_frame_asm.get_num_segments(); i++) {
sum += rx_frame_asm.get_segment_logical_len(i);
}
return sum;
}
void ProtocolV2::reset_throttle() {
if (state > THROTTLE_MESSAGE && state <= THROTTLE_DONE &&
connection->policy.throttler_messages) {
ldout(cct, 10) << __func__ << " releasing " << 1
<< " message to policy throttler "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< dendl;
connection->policy.throttler_messages->put();
}
if (state > THROTTLE_BYTES && state <= THROTTLE_DONE) {
if (connection->policy.throttler_bytes) {
const size_t cur_msg_size = get_current_msg_size();
ldout(cct, 10) << __func__ << " releasing " << cur_msg_size
<< " bytes to policy throttler "
<< connection->policy.throttler_bytes->get_current() << "/"
<< connection->policy.throttler_bytes->get_max() << dendl;
connection->policy.throttler_bytes->put(cur_msg_size);
}
}
if (state > THROTTLE_DISPATCH_QUEUE && state <= THROTTLE_DONE) {
const size_t cur_msg_size = get_current_msg_size();
ldout(cct, 10)
<< __func__ << " releasing " << cur_msg_size
<< " bytes to dispatch_queue throttler "
<< connection->dispatch_queue->dispatch_throttler.get_current() << "/"
<< connection->dispatch_queue->dispatch_throttler.get_max() << dendl;
connection->dispatch_queue->dispatch_throttle_release(cur_msg_size);
}
}
CtPtr ProtocolV2::_fault() {
ldout(cct, 10) << __func__ << dendl;
if (state == CLOSED || state == NONE) {
ldout(cct, 10) << __func__ << " connection is already closed" << dendl;
return nullptr;
}
if (connection->policy.lossy &&
!(state >= START_CONNECT && state <= SESSION_RECONNECTING)) {
ldout(cct, 2) << __func__ << " on lossy channel, failing" << dendl;
stop();
connection->dispatch_queue->queue_reset(connection);
return nullptr;
}
connection->write_lock.lock();
can_write = false;
// requeue sent items
requeue_sent();
if (out_queue.empty() && state >= START_ACCEPT &&
state <= SESSION_ACCEPTING && !replacing) {
ldout(cct, 2) << __func__ << " with nothing to send and in the half "
<< " accept state just closed" << dendl;
connection->write_lock.unlock();
stop();
connection->dispatch_queue->queue_reset(connection);
return nullptr;
}
replacing = false;
connection->fault();
reset_recv_state();
reconnecting = false;
if (connection->policy.standby && out_queue.empty() && !keepalive &&
state != WAIT) {
ldout(cct, 1) << __func__ << " with nothing to send, going to standby"
<< dendl;
state = STANDBY;
connection->write_lock.unlock();
return nullptr;
}
if (connection->policy.server) {
ldout(cct, 1) << __func__ << " server, going to standby, even though i have stuff queued" << dendl;
state = STANDBY;
connection->write_lock.unlock();
return nullptr;
}
connection->write_lock.unlock();
if (!(state >= START_CONNECT && state <= SESSION_RECONNECTING) &&
state != WAIT &&
state != SESSION_ACCEPTING /* due to connection race */) {
// policy maybe empty when state is in accept
if (connection->policy.server) {
ldout(cct, 1) << __func__ << " server, going to standby" << dendl;
state = STANDBY;
} else {
ldout(cct, 1) << __func__ << " initiating reconnect" << dendl;
connect_seq++;
global_seq = messenger->get_global_seq();
state = START_CONNECT;
pre_auth.enabled = true;
connection->state = AsyncConnection::STATE_CONNECTING;
}
backoff = utime_t();
connection->center->dispatch_event_external(connection->read_handler);
} else {
if (state == WAIT) {
backoff.set_from_double(cct->_conf->ms_max_backoff);
} else if (backoff == utime_t()) {
backoff.set_from_double(cct->_conf->ms_initial_backoff);
} else {
backoff += backoff;
if (backoff > cct->_conf->ms_max_backoff)
backoff.set_from_double(cct->_conf->ms_max_backoff);
}
if (server_cookie) {
connect_seq++;
}
global_seq = messenger->get_global_seq();
state = START_CONNECT;
pre_auth.enabled = true;
connection->state = AsyncConnection::STATE_CONNECTING;
ldout(cct, 1) << __func__ << " waiting " << backoff << dendl;
// woke up again;
connection->register_time_events.insert(
connection->center->create_time_event(backoff.to_nsec() / 1000,
connection->wakeup_handler));
}
return nullptr;
}
void ProtocolV2::prepare_send_message(uint64_t features,
Message *m) {
ldout(cct, 20) << __func__ << " m=" << *m << dendl;
// associate message with Connection (for benefit of encode_payload)
ldout(cct, 20) << __func__ << (m->empty_payload() ? " encoding features " : " half-reencoding features ")
<< features << " " << m << " " << *m << dendl;
// encode and copy out of *m
m->encode(features, 0);
}
void ProtocolV2::send_message(Message *m) {
uint64_t f = connection->get_features();
// TODO: Currently not all messages supports reencode like MOSDMap, so here
// only let fast dispatch support messages prepare message
const bool can_fast_prepare = messenger->ms_can_fast_dispatch(m);
if (can_fast_prepare) {
prepare_send_message(f, m);
}
std::lock_guard<std::mutex> l(connection->write_lock);
bool is_prepared = can_fast_prepare;
// "features" changes will change the payload encoding
if (can_fast_prepare && (!can_write || connection->get_features() != f)) {
// ensure the correctness of message encoding
m->clear_payload();
is_prepared = false;
ldout(cct, 10) << __func__ << " clear encoded buffer previous " << f
<< " != " << connection->get_features() << dendl;
}
if (state == CLOSED) {
ldout(cct, 10) << __func__ << " connection closed."
<< " Drop message " << m << dendl;
m->put();
} else {
ldout(cct, 5) << __func__ << " enqueueing message m=" << m
<< " type=" << m->get_type() << " " << *m << dendl;
m->queue_start = ceph::mono_clock::now();
m->trace.event("async enqueueing message");
out_queue[m->get_priority()].emplace_back(
out_queue_entry_t{is_prepared, m});
ldout(cct, 15) << __func__ << " inline write is denied, reschedule m=" << m
<< dendl;
if (((!replacing && can_write) || state == STANDBY) && !write_in_progress) {
write_in_progress = true;
connection->center->dispatch_event_external(connection->write_handler);
}
}
}
void ProtocolV2::send_keepalive() {
ldout(cct, 10) << __func__ << dendl;
std::lock_guard<std::mutex> l(connection->write_lock);
if (state != CLOSED) {
keepalive = true;
connection->center->dispatch_event_external(connection->write_handler);
}
}
void ProtocolV2::read_event() {
ldout(cct, 20) << __func__ << dendl;
switch (state) {
case START_CONNECT:
run_continuation(CONTINUATION(start_client_banner_exchange));
break;
case START_ACCEPT:
run_continuation(CONTINUATION(start_server_banner_exchange));
break;
case READY:
run_continuation(CONTINUATION(read_frame));
break;
case THROTTLE_MESSAGE:
run_continuation(CONTINUATION(throttle_message));
break;
case THROTTLE_BYTES:
run_continuation(CONTINUATION(throttle_bytes));
break;
case THROTTLE_DISPATCH_QUEUE:
run_continuation(CONTINUATION(throttle_dispatch_queue));
break;
default:
break;
}
}
ProtocolV2::out_queue_entry_t ProtocolV2::_get_next_outgoing() {
out_queue_entry_t out_entry;
if (!out_queue.empty()) {
auto it = out_queue.rbegin();
auto& entries = it->second;
ceph_assert(!entries.empty());
out_entry = entries.front();
entries.pop_front();
if (entries.empty()) {
out_queue.erase(it->first);
}
}
return out_entry;
}
ssize_t ProtocolV2::write_message(Message *m, bool more) {
FUNCTRACE(cct);
ceph_assert(connection->center->in_thread());
m->set_seq(++out_seq);
connection->lock.lock();
uint64_t ack_seq = in_seq;
ack_left = 0;
connection->lock.unlock();
ceph_msg_header &header = m->get_header();
ceph_msg_footer &footer = m->get_footer();
ceph_msg_header2 header2{header.seq, header.tid,
header.type, header.priority,
header.version,
ceph_le32(0), header.data_off,
ceph_le64(ack_seq),
footer.flags, header.compat_version,
header.reserved};
auto message = MessageFrame::Encode(
header2,
m->get_payload(),
m->get_middle(),
m->get_data());
if (!append_frame(message)) {
m->put();
return -EILSEQ;
}
ldout(cct, 5) << __func__ << " sending message m=" << m
<< " seq=" << m->get_seq() << " " << *m << dendl;
m->trace.event("async writing message");
ldout(cct, 20) << __func__ << " sending m=" << m << " seq=" << m->get_seq()
<< " src=" << entity_name_t(messenger->get_myname())
<< " off=" << header2.data_off
<< dendl;
ssize_t total_send_size = connection->outgoing_bl.length();
ssize_t rc = connection->_try_send(more);
if (rc < 0) {
ldout(cct, 1) << __func__ << " error sending " << m << ", "
<< cpp_strerror(rc) << dendl;
} else {
const auto sent_bytes = total_send_size - connection->outgoing_bl.length();
connection->logger->inc(l_msgr_send_bytes, sent_bytes);
if (session_stream_handlers.tx) {
connection->logger->inc(l_msgr_send_encrypted_bytes, sent_bytes);
}
ldout(cct, 10) << __func__ << " sending " << m
<< (rc ? " continuely." : " done.") << dendl;
}
#if defined(WITH_EVENTTRACE)
if (m->get_type() == CEPH_MSG_OSD_OP)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OP_END", false);
else if (m->get_type() == CEPH_MSG_OSD_OPREPLY)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OPREPLY_END", false);
#endif
m->put();
return rc;
}
template <class F>
bool ProtocolV2::append_frame(F& frame) {
ceph::bufferlist bl;
try {
bl = frame.get_buffer(tx_frame_asm);
} catch (ceph::crypto::onwire::TxHandlerError &e) {
ldout(cct, 1) << __func__ << " " << e.what() << dendl;
return false;
}
ldout(cct, 25) << __func__ << " assembled frame " << bl.length()
<< " bytes " << tx_frame_asm << dendl;
connection->outgoing_bl.claim_append(bl);
return true;
}
void ProtocolV2::handle_message_ack(uint64_t seq) {
if (connection->policy.lossy) { // lossy connections don't keep sent messages
return;
}
ldout(cct, 15) << __func__ << " seq=" << seq << dendl;
// trim sent list
static const int max_pending = 128;
int i = 0;
Message *pending[max_pending];
auto now = ceph::mono_clock::now();
connection->write_lock.lock();
while (!sent.empty() && sent.front()->get_seq() <= seq && i < max_pending) {
Message *m = sent.front();
sent.pop_front();
pending[i++] = m;
ldout(cct, 10) << __func__ << " got ack seq " << seq
<< " >= " << m->get_seq() << " on " << m << " " << *m
<< dendl;
}
connection->write_lock.unlock();
connection->logger->tinc(l_msgr_handle_ack_lat, ceph::mono_clock::now() - now);
for (int k = 0; k < i; k++) {
pending[k]->put();
}
}
void ProtocolV2::reset_compression() {
ldout(cct, 5) << __func__ << dendl;
comp_meta = CompConnectionMeta{};
session_compression_handlers.rx.reset(nullptr);
session_compression_handlers.tx.reset(nullptr);
}
void ProtocolV2::write_event() {
ldout(cct, 10) << __func__ << dendl;
ssize_t r = 0;
connection->write_lock.lock();
if (can_write) {
if (keepalive) {
ldout(cct, 10) << __func__ << " appending keepalive" << dendl;
auto keepalive_frame = KeepAliveFrame::Encode();
if (!append_frame(keepalive_frame)) {
connection->write_lock.unlock();
connection->lock.lock();
fault();
connection->lock.unlock();
return;
}
keepalive = false;
}
auto start = ceph::mono_clock::now();
bool more;
do {
if (connection->is_queued()) {
if (r = connection->_try_send(); r!= 0) {
// either fails to send or not all queued buffer is sent
break;
}
}
const auto out_entry = _get_next_outgoing();
if (!out_entry.m) {
break;
}
if (!connection->policy.lossy) {
// put on sent list
sent.push_back(out_entry.m);
out_entry.m->get();
}
more = !out_queue.empty();
connection->write_lock.unlock();
// send_message or requeue messages may not encode message
if (!out_entry.is_prepared) {
prepare_send_message(connection->get_features(), out_entry.m);
}
if (out_entry.m->queue_start != ceph::mono_time()) {
connection->logger->tinc(l_msgr_send_messages_queue_lat,
ceph::mono_clock::now() -
out_entry.m->queue_start);
}
r = write_message(out_entry.m, more);
connection->write_lock.lock();
if (r == 0) {
;
} else if (r < 0) {
ldout(cct, 1) << __func__ << " send msg failed" << dendl;
break;
} else if (r > 0) {
// Outbound message in-progress, thread will be re-awoken
// when the outbound socket is writeable again
break;
}
} while (can_write);
write_in_progress = false;
// if r > 0 mean data still lefted, so no need _try_send.
if (r == 0) {
uint64_t left = ack_left;
if (left) {
ldout(cct, 10) << __func__ << " try send msg ack, acked " << left
<< " messages" << dendl;
auto ack_frame = AckFrame::Encode(in_seq);
if (append_frame(ack_frame)) {
ack_left -= left;
left = ack_left;
r = connection->_try_send(left);
} else {
r = -EILSEQ;
}
} else if (is_queued()) {
r = connection->_try_send();
}
}
connection->write_lock.unlock();
connection->logger->tinc(l_msgr_running_send_time,
ceph::mono_clock::now() - start);
if (r < 0) {
ldout(cct, 1) << __func__ << " send msg failed" << dendl;
connection->lock.lock();
fault();
connection->lock.unlock();
return;
}
} else {
write_in_progress = false;
connection->write_lock.unlock();
connection->lock.lock();
connection->write_lock.lock();
if (state == STANDBY && !connection->policy.server && is_queued()) {
ldout(cct, 10) << __func__ << " policy.server is false" << dendl;
if (server_cookie) { // only increment connect_seq if there is a session
connect_seq++;
}
connection->_connect();
} else if (connection->cs && state != NONE && state != CLOSED &&
state != START_CONNECT) {
r = connection->_try_send();
if (r < 0) {
ldout(cct, 1) << __func__ << " send outcoming bl failed" << dendl;
connection->write_lock.unlock();
fault();
connection->lock.unlock();
return;
}
}
connection->write_lock.unlock();
connection->lock.unlock();
}
}
bool ProtocolV2::is_queued() {
return !out_queue.empty() || connection->is_queued();
}
CtPtr ProtocolV2::read(CONTINUATION_RXBPTR_TYPE<ProtocolV2> &next,
rx_buffer_t &&buffer) {
const auto len = buffer->length();
const auto buf = buffer->c_str();
next.node = std::move(buffer);
ssize_t r = connection->read(len, buf,
[&next, this](char *buffer, int r) {
if (unlikely(pre_auth.enabled) && r >= 0) {
pre_auth.rxbuf.append(*next.node);
ceph_assert(!cct->_conf->ms_die_on_bug ||
pre_auth.rxbuf.length() < 20000000);
}
next.r = r;
run_continuation(next);
});
if (r <= 0) {
// error or done synchronously
if (unlikely(pre_auth.enabled) && r == 0) {
pre_auth.rxbuf.append(*next.node);
ceph_assert(!cct->_conf->ms_die_on_bug ||
pre_auth.rxbuf.length() < 20000000);
}
next.r = r;
return &next;
}
return nullptr;
}
template <class F>
CtPtr ProtocolV2::write(const std::string &desc,
CONTINUATION_TYPE<ProtocolV2> &next,
F &frame) {
ceph::bufferlist bl;
try {
bl = frame.get_buffer(tx_frame_asm);
} catch (ceph::crypto::onwire::TxHandlerError &e) {
ldout(cct, 1) << __func__ << " " << e.what() << dendl;
return _fault();
}
ldout(cct, 25) << __func__ << " assembled frame " << bl.length()
<< " bytes " << tx_frame_asm << dendl;
return write(desc, next, bl);
}
CtPtr ProtocolV2::write(const std::string &desc,
CONTINUATION_TYPE<ProtocolV2> &next,
ceph::bufferlist &buffer) {
if (unlikely(pre_auth.enabled)) {
pre_auth.txbuf.append(buffer);
ceph_assert(!cct->_conf->ms_die_on_bug ||
pre_auth.txbuf.length() < 20000000);
}
ssize_t r =
connection->write(buffer, [&next, desc, this](int r) {
if (r < 0) {
ldout(cct, 1) << __func__ << " " << desc << " write failed r=" << r
<< " (" << cpp_strerror(r) << ")" << dendl;
connection->inject_delay();
_fault();
}
run_continuation(next);
});
if (r < 0) {
ldout(cct, 1) << __func__ << " " << desc << " write failed r=" << r
<< " (" << cpp_strerror(r) << ")" << dendl;
return _fault();
} else if (r == 0) {
next.setParams();
return &next;
}
return nullptr;
}
CtPtr ProtocolV2::_banner_exchange(CtRef callback) {
ldout(cct, 20) << __func__ << dendl;
bannerExchangeCallback = &callback;
ceph::bufferlist banner_payload;
using ceph::encode;
encode((uint64_t)CEPH_MSGR2_SUPPORTED_FEATURES, banner_payload, 0);
encode((uint64_t)CEPH_MSGR2_REQUIRED_FEATURES, banner_payload, 0);
ceph::bufferlist bl;
bl.append(CEPH_BANNER_V2_PREFIX, strlen(CEPH_BANNER_V2_PREFIX));
encode((uint16_t)banner_payload.length(), bl, 0);
bl.claim_append(banner_payload);
INTERCEPT(state == BANNER_CONNECTING ? 3 : 4);
return WRITE(bl, "banner", _wait_for_peer_banner);
}
CtPtr ProtocolV2::_wait_for_peer_banner() {
unsigned banner_len = strlen(CEPH_BANNER_V2_PREFIX) + sizeof(ceph_le16);
return READ(banner_len, _handle_peer_banner);
}
CtPtr ProtocolV2::_handle_peer_banner(rx_buffer_t &&buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read peer banner failed r=" << r << " ("
<< cpp_strerror(r) << ")" << dendl;
return _fault();
}
unsigned banner_prefix_len = strlen(CEPH_BANNER_V2_PREFIX);
if (memcmp(buffer->c_str(), CEPH_BANNER_V2_PREFIX, banner_prefix_len)) {
if (memcmp(buffer->c_str(), CEPH_BANNER, strlen(CEPH_BANNER)) == 0) {
lderr(cct) << __func__ << " peer " << *connection->peer_addrs
<< " is using msgr V1 protocol" << dendl;
return _fault();
}
ldout(cct, 1) << __func__ << " accept peer sent bad banner" << dendl;
return _fault();
}
uint16_t payload_len;
ceph::bufferlist bl;
buffer->set_offset(banner_prefix_len);
buffer->set_length(sizeof(ceph_le16));
bl.push_back(std::move(buffer));
auto ti = bl.cbegin();
using ceph::decode;
try {
decode(payload_len, ti);
} catch (const ceph::buffer::error &e) {
lderr(cct) << __func__ << " decode banner payload len failed " << dendl;
return _fault();
}
INTERCEPT(state == BANNER_CONNECTING ? 5 : 6);
return READ(payload_len, _handle_peer_banner_payload);
}
CtPtr ProtocolV2::_handle_peer_banner_payload(rx_buffer_t &&buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read peer banner payload failed r=" << r
<< " (" << cpp_strerror(r) << ")" << dendl;
return _fault();
}
uint64_t peer_supported_features;
uint64_t peer_required_features;
ceph::bufferlist bl;
using ceph::decode;
bl.push_back(std::move(buffer));
auto ti = bl.cbegin();
try {
decode(peer_supported_features, ti);
decode(peer_required_features, ti);
} catch (const ceph::buffer::error &e) {
lderr(cct) << __func__ << " decode banner payload failed " << dendl;
return _fault();
}
ldout(cct, 1) << __func__ << " supported=" << std::hex
<< peer_supported_features << " required=" << std::hex
<< peer_required_features << std::dec << dendl;
// Check feature bit compatibility
uint64_t supported_features = CEPH_MSGR2_SUPPORTED_FEATURES;
uint64_t required_features = CEPH_MSGR2_REQUIRED_FEATURES;
if ((required_features & peer_supported_features) != required_features) {
ldout(cct, 1) << __func__ << " peer does not support all required features"
<< " required=" << std::hex << required_features
<< " supported=" << std::hex << peer_supported_features
<< std::dec << dendl;
stop();
connection->dispatch_queue->queue_reset(connection);
return nullptr;
}
if ((supported_features & peer_required_features) != peer_required_features) {
ldout(cct, 1) << __func__ << " we do not support all peer required features"
<< " required=" << std::hex << peer_required_features
<< " supported=" << supported_features << std::dec << dendl;
stop();
connection->dispatch_queue->queue_reset(connection);
return nullptr;
}
this->peer_supported_features = peer_supported_features;
if (peer_required_features == 0) {
this->connection_features = msgr2_required;
}
// if the peer supports msgr2.1, switch to it
bool is_rev1 = HAVE_MSGR2_FEATURE(peer_supported_features, REVISION_1);
tx_frame_asm.set_is_rev1(is_rev1);
rx_frame_asm.set_is_rev1(is_rev1);
if (state == BANNER_CONNECTING) {
state = HELLO_CONNECTING;
}
else {
ceph_assert(state == BANNER_ACCEPTING);
state = HELLO_ACCEPTING;
}
auto hello = HelloFrame::Encode(messenger->get_mytype(),
connection->target_addr);
INTERCEPT(state == HELLO_CONNECTING ? 7 : 8);
return WRITE(hello, "hello frame", read_frame);
}
CtPtr ProtocolV2::handle_hello(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != HELLO_CONNECTING && state != HELLO_ACCEPTING) {
lderr(cct) << __func__ << " not in hello exchange state!" << dendl;
return _fault();
}
auto hello = HelloFrame::Decode(payload);
ldout(cct, 5) << __func__ << " received hello:"
<< " peer_type=" << (int)hello.entity_type()
<< " peer_addr_for_me=" << hello.peer_addr() << dendl;
sockaddr_storage ss;
socklen_t len = sizeof(ss);
getsockname(connection->cs.fd(), (sockaddr *)&ss, &len);
ldout(cct, 5) << __func__ << " getsockname says I am " << (sockaddr *)&ss
<< " when talking to " << connection->target_addr << dendl;
if (connection->get_peer_type() == -1) {
connection->set_peer_type(hello.entity_type());
ceph_assert(state == HELLO_ACCEPTING);
connection->policy = messenger->get_policy(hello.entity_type());
ldout(cct, 10) << __func__ << " accept of host_type "
<< (int)hello.entity_type()
<< ", policy.lossy=" << connection->policy.lossy
<< " policy.server=" << connection->policy.server
<< " policy.standby=" << connection->policy.standby
<< " policy.resetcheck=" << connection->policy.resetcheck
<< dendl;
} else {
ceph_assert(state == HELLO_CONNECTING);
if (connection->get_peer_type() != hello.entity_type()) {
ldout(cct, 1) << __func__ << " connection peer type does not match what"
<< " peer advertises " << connection->get_peer_type()
<< " != " << (int)hello.entity_type() << dendl;
stop();
connection->dispatch_queue->queue_reset(connection);
return nullptr;
}
}
if (messenger->get_myaddrs().empty() ||
messenger->get_myaddrs().front().is_blank_ip()) {
entity_addr_t a;
if (cct->_conf->ms_learn_addr_from_peer) {
ldout(cct, 1) << __func__ << " peer " << connection->target_addr
<< " says I am " << hello.peer_addr() << " (socket says "
<< (sockaddr*)&ss << ")" << dendl;
a = hello.peer_addr();
} else {
ldout(cct, 1) << __func__ << " socket to " << connection->target_addr
<< " says I am " << (sockaddr*)&ss
<< " (peer says " << hello.peer_addr() << ")" << dendl;
a.set_sockaddr((sockaddr *)&ss);
}
a.set_type(entity_addr_t::TYPE_MSGR2); // anything but NONE; learned_addr ignores this
a.set_port(0);
connection->lock.unlock();
messenger->learned_addr(a);
if (cct->_conf->ms_inject_internal_delays &&
cct->_conf->ms_inject_socket_failures) {
if (rand() % cct->_conf->ms_inject_socket_failures == 0) {
ldout(cct, 10) << __func__ << " sleep for "
<< cct->_conf->ms_inject_internal_delays << dendl;
utime_t t;
t.set_from_double(cct->_conf->ms_inject_internal_delays);
t.sleep();
}
}
connection->lock.lock();
if (state != HELLO_CONNECTING) {
ldout(cct, 1) << __func__
<< " state changed while learned_addr, mark_down or "
<< " replacing must be happened just now" << dendl;
return nullptr;
}
}
CtPtr callback;
callback = bannerExchangeCallback;
bannerExchangeCallback = nullptr;
ceph_assert(callback);
return callback;
}
CtPtr ProtocolV2::read_frame() {
if (state == CLOSED) {
return nullptr;
}
ldout(cct, 20) << __func__ << dendl;
rx_preamble.clear();
rx_epilogue.clear();
rx_segments_data.clear();
return READ(rx_frame_asm.get_preamble_onwire_len(),
handle_read_frame_preamble_main);
}
CtPtr ProtocolV2::handle_read_frame_preamble_main(rx_buffer_t &&buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read frame preamble failed r=" << r
<< dendl;
return _fault();
}
rx_preamble.push_back(std::move(buffer));
ldout(cct, 30) << __func__ << " preamble\n";
rx_preamble.hexdump(*_dout);
*_dout << dendl;
try {
next_tag = rx_frame_asm.disassemble_preamble(rx_preamble);
} catch (FrameError& e) {
ldout(cct, 1) << __func__ << " " << e.what() << dendl;
return _fault();
} catch (ceph::crypto::onwire::MsgAuthError&) {
ldout(cct, 1) << __func__ << "bad auth tag" << dendl;
return _fault();
}
ldout(cct, 25) << __func__ << " disassembled preamble " << rx_frame_asm
<< dendl;
if (session_stream_handlers.rx) {
ldout(cct, 30) << __func__ << " preamble after decrypt\n";
rx_preamble.hexdump(*_dout);
*_dout << dendl;
}
// does it need throttle?
if (next_tag == Tag::MESSAGE) {
if (state != READY) {
lderr(cct) << __func__ << " not in ready state!" << dendl;
return _fault();
}
recv_stamp = ceph_clock_now();
state = THROTTLE_MESSAGE;
return CONTINUE(throttle_message);
} else {
return read_frame_segment();
}
}
CtPtr ProtocolV2::handle_read_frame_dispatch() {
ldout(cct, 10) << __func__
<< " tag=" << static_cast<uint32_t>(next_tag) << dendl;
switch (next_tag) {
case Tag::HELLO:
case Tag::AUTH_REQUEST:
case Tag::AUTH_BAD_METHOD:
case Tag::AUTH_REPLY_MORE:
case Tag::AUTH_REQUEST_MORE:
case Tag::AUTH_DONE:
case Tag::AUTH_SIGNATURE:
case Tag::CLIENT_IDENT:
case Tag::SERVER_IDENT:
case Tag::IDENT_MISSING_FEATURES:
case Tag::SESSION_RECONNECT:
case Tag::SESSION_RESET:
case Tag::SESSION_RETRY:
case Tag::SESSION_RETRY_GLOBAL:
case Tag::SESSION_RECONNECT_OK:
case Tag::KEEPALIVE2:
case Tag::KEEPALIVE2_ACK:
case Tag::ACK:
case Tag::WAIT:
case Tag::COMPRESSION_REQUEST:
case Tag::COMPRESSION_DONE:
return handle_frame_payload();
case Tag::MESSAGE:
return handle_message();
default: {
lderr(cct) << __func__
<< " received unknown tag=" << static_cast<uint32_t>(next_tag)
<< dendl;
return _fault();
}
}
return nullptr;
}
CtPtr ProtocolV2::read_frame_segment() {
size_t seg_idx = rx_segments_data.size();
ldout(cct, 20) << __func__ << " seg_idx=" << seg_idx << dendl;
rx_segments_data.emplace_back();
uint32_t onwire_len = rx_frame_asm.get_segment_onwire_len(seg_idx);
if (onwire_len == 0) {
return _handle_read_frame_segment();
}
rx_buffer_t rx_buffer;
uint16_t align = rx_frame_asm.get_segment_align(seg_idx);
try {
rx_buffer = ceph::buffer::ptr_node::create(ceph::buffer::create_aligned(
onwire_len, align));
} catch (const ceph::buffer::bad_alloc&) {
// Catching because of potential issues with satisfying alignment.
ldout(cct, 1) << __func__ << " can't allocate aligned rx_buffer"
<< " len=" << onwire_len
<< " align=" << align
<< dendl;
return _fault();
}
return READ_RXBUF(std::move(rx_buffer), handle_read_frame_segment);
}
CtPtr ProtocolV2::handle_read_frame_segment(rx_buffer_t &&rx_buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read frame segment failed r=" << r << " ("
<< cpp_strerror(r) << ")" << dendl;
return _fault();
}
rx_segments_data.back().push_back(std::move(rx_buffer));
return _handle_read_frame_segment();
}
CtPtr ProtocolV2::_handle_read_frame_segment() {
if (rx_segments_data.size() == rx_frame_asm.get_num_segments()) {
// OK, all segments planned to read are read. Can go with epilogue.
uint32_t epilogue_onwire_len = rx_frame_asm.get_epilogue_onwire_len();
if (epilogue_onwire_len == 0) {
return _handle_read_frame_epilogue_main();
}
return READ(epilogue_onwire_len, handle_read_frame_epilogue_main);
}
// TODO: for makeshift only. This will be more generic and throttled
return read_frame_segment();
}
CtPtr ProtocolV2::handle_frame_payload() {
ceph_assert(!rx_segments_data.empty());
auto& payload = rx_segments_data.back();
ldout(cct, 30) << __func__ << "\n";
payload.hexdump(*_dout);
*_dout << dendl;
switch (next_tag) {
case Tag::HELLO:
return handle_hello(payload);
case Tag::AUTH_REQUEST:
return handle_auth_request(payload);
case Tag::AUTH_BAD_METHOD:
return handle_auth_bad_method(payload);
case Tag::AUTH_REPLY_MORE:
return handle_auth_reply_more(payload);
case Tag::AUTH_REQUEST_MORE:
return handle_auth_request_more(payload);
case Tag::AUTH_DONE:
return handle_auth_done(payload);
case Tag::AUTH_SIGNATURE:
return handle_auth_signature(payload);
case Tag::CLIENT_IDENT:
return handle_client_ident(payload);
case Tag::SERVER_IDENT:
return handle_server_ident(payload);
case Tag::IDENT_MISSING_FEATURES:
return handle_ident_missing_features(payload);
case Tag::SESSION_RECONNECT:
return handle_reconnect(payload);
case Tag::SESSION_RESET:
return handle_session_reset(payload);
case Tag::SESSION_RETRY:
return handle_session_retry(payload);
case Tag::SESSION_RETRY_GLOBAL:
return handle_session_retry_global(payload);
case Tag::SESSION_RECONNECT_OK:
return handle_reconnect_ok(payload);
case Tag::KEEPALIVE2:
return handle_keepalive2(payload);
case Tag::KEEPALIVE2_ACK:
return handle_keepalive2_ack(payload);
case Tag::ACK:
return handle_message_ack(payload);
case Tag::WAIT:
return handle_wait(payload);
case Tag::COMPRESSION_REQUEST:
return handle_compression_request(payload);
case Tag::COMPRESSION_DONE:
return handle_compression_done(payload);
default:
ceph_abort();
}
return nullptr;
}
CtPtr ProtocolV2::ready() {
ldout(cct, 25) << __func__ << dendl;
reconnecting = false;
replacing = false;
// make sure no pending tick timer
if (connection->last_tick_id) {
connection->center->delete_time_event(connection->last_tick_id);
}
connection->last_tick_id = connection->center->create_time_event(
connection->inactive_timeout_us, connection->tick_handler);
{
std::lock_guard<std::mutex> l(connection->write_lock);
can_write = true;
if (!out_queue.empty()) {
connection->center->dispatch_event_external(connection->write_handler);
}
}
connection->maybe_start_delay_thread();
state = READY;
ldout(cct, 1) << __func__ << " entity=" << peer_name << " client_cookie="
<< std::hex << client_cookie << " server_cookie="
<< server_cookie << std::dec << " in_seq=" << in_seq
<< " out_seq=" << out_seq << dendl;
INTERCEPT(15);
return CONTINUE(read_frame);
}
CtPtr ProtocolV2::handle_read_frame_epilogue_main(rx_buffer_t &&buffer, int r)
{
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read frame epilogue failed r=" << r
<< " (" << cpp_strerror(r) << ")" << dendl;
return _fault();
}
rx_epilogue.push_back(std::move(buffer));
return _handle_read_frame_epilogue_main();
}
CtPtr ProtocolV2::_handle_read_frame_epilogue_main() {
bool ok = false;
try {
ok = rx_frame_asm.disassemble_segments(rx_preamble, rx_segments_data.data(), rx_epilogue);
} catch (FrameError& e) {
ldout(cct, 1) << __func__ << " " << e.what() << dendl;
return _fault();
} catch (ceph::crypto::onwire::MsgAuthError&) {
ldout(cct, 1) << __func__ << "bad auth tag" << dendl;
return _fault();
}
// we do have a mechanism that allows transmitter to start sending message
// and abort after putting entire data field on wire. This will be used by
// the kernel client to avoid unnecessary buffering.
if (!ok) {
reset_throttle();
state = READY;
return CONTINUE(read_frame);
}
return handle_read_frame_dispatch();
}
CtPtr ProtocolV2::handle_message() {
ldout(cct, 20) << __func__ << dendl;
ceph_assert(state == THROTTLE_DONE);
const size_t cur_msg_size = get_current_msg_size();
auto msg_frame = MessageFrame::Decode(rx_segments_data);
// XXX: paranoid copy just to avoid oops
ceph_msg_header2 current_header = msg_frame.header();
ldout(cct, 5) << __func__
<< " got " << msg_frame.front_len()
<< " + " << msg_frame.middle_len()
<< " + " << msg_frame.data_len()
<< " byte message."
<< " envelope type=" << current_header.type
<< " src " << peer_name
<< " off " << current_header.data_off
<< dendl;
INTERCEPT(16);
ceph_msg_header header{current_header.seq,
current_header.tid,
current_header.type,
current_header.priority,
current_header.version,
ceph_le32(msg_frame.front_len()),
ceph_le32(msg_frame.middle_len()),
ceph_le32(msg_frame.data_len()),
current_header.data_off,
peer_name,
current_header.compat_version,
current_header.reserved,
ceph_le32(0)};
ceph_msg_footer footer{ceph_le32(0), ceph_le32(0),
ceph_le32(0), ceph_le64(0), current_header.flags};
Message *message = decode_message(cct, 0, header, footer,
msg_frame.front(),
msg_frame.middle(),
msg_frame.data(),
connection);
if (!message) {
ldout(cct, 1) << __func__ << " decode message failed " << dendl;
return _fault();
} else {
state = READ_MESSAGE_COMPLETE;
}
INTERCEPT(17);
message->set_byte_throttler(connection->policy.throttler_bytes);
message->set_message_throttler(connection->policy.throttler_messages);
// store reservation size in message, so we don't get confused
// by messages entering the dispatch queue through other paths.
message->set_dispatch_throttle_size(cur_msg_size);
message->set_recv_stamp(recv_stamp);
message->set_throttle_stamp(throttle_stamp);
message->set_recv_complete_stamp(ceph_clock_now());
// check received seq#. if it is old, drop the message.
// note that incoming messages may skip ahead. this is convenient for the
// client side queueing because messages can't be renumbered, but the (kernel)
// client will occasionally pull a message out of the sent queue to send
// elsewhere. in that case it doesn't matter if we "got" it or not.
uint64_t cur_seq = in_seq;
if (message->get_seq() <= cur_seq) {
ldout(cct, 0) << __func__ << " got old message " << message->get_seq()
<< " <= " << cur_seq << " " << message << " " << *message
<< ", discarding" << dendl;
message->put();
if (connection->has_feature(CEPH_FEATURE_RECONNECT_SEQ) &&
cct->_conf->ms_die_on_old_message) {
ceph_assert(0 == "old msgs despite reconnect_seq feature");
}
return nullptr;
}
if (message->get_seq() > cur_seq + 1) {
ldout(cct, 0) << __func__ << " missed message? skipped from seq "
<< cur_seq << " to " << message->get_seq() << dendl;
if (cct->_conf->ms_die_on_skipped_message) {
ceph_assert(0 == "skipped incoming seq");
}
}
#if defined(WITH_EVENTTRACE)
if (message->get_type() == CEPH_MSG_OSD_OP ||
message->get_type() == CEPH_MSG_OSD_OPREPLY) {
utime_t ltt_processed_stamp = ceph_clock_now();
double usecs_elapsed =
((double)(ltt_processed_stamp.to_nsec() - recv_stamp.to_nsec())) / 1000;
ostringstream buf;
if (message->get_type() == CEPH_MSG_OSD_OP)
OID_ELAPSED_WITH_MSG(message, usecs_elapsed, "TIME_TO_DECODE_OSD_OP",
false);
else
OID_ELAPSED_WITH_MSG(message, usecs_elapsed, "TIME_TO_DECODE_OSD_OPREPLY",
false);
}
#endif
// note last received message.
in_seq = message->get_seq();
ldout(cct, 5) << __func__ << " received message m=" << message
<< " seq=" << message->get_seq()
<< " from=" << message->get_source() << " type=" << header.type
<< " " << *message << dendl;
bool need_dispatch_writer = false;
if (!connection->policy.lossy) {
ack_left++;
need_dispatch_writer = true;
}
state = READY;
ceph::mono_time fast_dispatch_time;
if (connection->is_blackhole()) {
ldout(cct, 10) << __func__ << " blackhole " << *message << dendl;
message->put();
goto out;
}
connection->logger->inc(l_msgr_recv_messages);
connection->logger->inc(l_msgr_recv_bytes,
rx_frame_asm.get_frame_onwire_len());
if (session_stream_handlers.rx) {
connection->logger->inc(l_msgr_recv_encrypted_bytes,
rx_frame_asm.get_frame_onwire_len());
}
messenger->ms_fast_preprocess(message);
fast_dispatch_time = ceph::mono_clock::now();
connection->logger->tinc(l_msgr_running_recv_time,
fast_dispatch_time - connection->recv_start_time);
if (connection->delay_state) {
double delay_period = 0;
if (rand() % 10000 < cct->_conf->ms_inject_delay_probability * 10000.0) {
delay_period =
cct->_conf->ms_inject_delay_max * (double)(rand() % 10000) / 10000.0;
ldout(cct, 1) << "queue_received will delay after "
<< (ceph_clock_now() + delay_period) << " on " << message
<< " " << *message << dendl;
}
connection->delay_state->queue(delay_period, message);
} else if (messenger->ms_can_fast_dispatch(message)) {
connection->lock.unlock();
connection->dispatch_queue->fast_dispatch(message);
connection->recv_start_time = ceph::mono_clock::now();
connection->logger->tinc(l_msgr_running_fast_dispatch_time,
connection->recv_start_time - fast_dispatch_time);
connection->lock.lock();
// we might have been reused by another connection
// let's check if that is the case
if (state != READY) {
// yes, that was the case, let's do nothing
return nullptr;
}
} else {
connection->dispatch_queue->enqueue(message, message->get_priority(),
connection->conn_id);
}
handle_message_ack(current_header.ack_seq);
out:
if (need_dispatch_writer && connection->is_connected()) {
connection->center->dispatch_event_external(connection->write_handler);
}
return CONTINUE(read_frame);
}
CtPtr ProtocolV2::throttle_message() {
ldout(cct, 20) << __func__ << dendl;
if (connection->policy.throttler_messages) {
ldout(cct, 10) << __func__ << " wants " << 1
<< " message from policy throttler "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< dendl;
if (!connection->policy.throttler_messages->get_or_fail()) {
ldout(cct, 1) << __func__ << " wants 1 message from policy throttle "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< " failed, just wait." << dendl;
// following thread pool deal with th full message queue isn't a
// short time, so we can wait a ms.
if (connection->register_time_events.empty()) {
connection->register_time_events.insert(
connection->center->create_time_event(1000,
connection->wakeup_handler));
}
return nullptr;
}
}
state = THROTTLE_BYTES;
return CONTINUE(throttle_bytes);
}
CtPtr ProtocolV2::throttle_bytes() {
ldout(cct, 20) << __func__ << dendl;
const size_t cur_msg_size = get_current_msg_size();
if (cur_msg_size) {
if (connection->policy.throttler_bytes) {
ldout(cct, 10) << __func__ << " wants " << cur_msg_size
<< " bytes from policy throttler "
<< connection->policy.throttler_bytes->get_current() << "/"
<< connection->policy.throttler_bytes->get_max() << dendl;
if (!connection->policy.throttler_bytes->get_or_fail(cur_msg_size)) {
ldout(cct, 1) << __func__ << " wants " << cur_msg_size
<< " bytes from policy throttler "
<< connection->policy.throttler_bytes->get_current()
<< "/" << connection->policy.throttler_bytes->get_max()
<< " failed, just wait." << dendl;
// following thread pool deal with th full message queue isn't a
// short time, so we can wait a ms.
if (connection->register_time_events.empty()) {
connection->register_time_events.insert(
connection->center->create_time_event(
1000, connection->wakeup_handler));
}
return nullptr;
}
}
}
state = THROTTLE_DISPATCH_QUEUE;
return CONTINUE(throttle_dispatch_queue);
}
CtPtr ProtocolV2::throttle_dispatch_queue() {
ldout(cct, 20) << __func__ << dendl;
const size_t cur_msg_size = get_current_msg_size();
if (cur_msg_size) {
if (!connection->dispatch_queue->dispatch_throttler.get_or_fail(
cur_msg_size)) {
ldout(cct, 1)
<< __func__ << " wants " << cur_msg_size
<< " bytes from dispatch throttle "
<< connection->dispatch_queue->dispatch_throttler.get_current() << "/"
<< connection->dispatch_queue->dispatch_throttler.get_max()
<< " failed, just wait." << dendl;
// following thread pool deal with th full message queue isn't a
// short time, so we can wait a ms.
if (connection->register_time_events.empty()) {
connection->register_time_events.insert(
connection->center->create_time_event(1000,
connection->wakeup_handler));
}
return nullptr;
}
}
throttle_stamp = ceph_clock_now();
state = THROTTLE_DONE;
return read_frame_segment();
}
CtPtr ProtocolV2::handle_keepalive2(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != READY) {
lderr(cct) << __func__ << " not in ready state!" << dendl;
return _fault();
}
auto keepalive_frame = KeepAliveFrame::Decode(payload);
ldout(cct, 30) << __func__ << " got KEEPALIVE2 tag ..." << dendl;
connection->write_lock.lock();
auto keepalive_ack_frame = KeepAliveFrameAck::Encode(keepalive_frame.timestamp());
if (!append_frame(keepalive_ack_frame)) {
connection->write_lock.unlock();
return _fault();
}
connection->write_lock.unlock();
ldout(cct, 20) << __func__ << " got KEEPALIVE2 "
<< keepalive_frame.timestamp() << dendl;
connection->set_last_keepalive(ceph_clock_now());
if (is_connected()) {
connection->center->dispatch_event_external(connection->write_handler);
}
return CONTINUE(read_frame);
}
CtPtr ProtocolV2::handle_keepalive2_ack(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != READY) {
lderr(cct) << __func__ << " not in ready state!" << dendl;
return _fault();
}
auto keepalive_ack_frame = KeepAliveFrameAck::Decode(payload);
connection->set_last_keepalive_ack(keepalive_ack_frame.timestamp());
ldout(cct, 20) << __func__ << " got KEEPALIVE_ACK" << dendl;
return CONTINUE(read_frame);
}
CtPtr ProtocolV2::handle_message_ack(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != READY) {
lderr(cct) << __func__ << " not in ready state!" << dendl;
return _fault();
}
auto ack = AckFrame::Decode(payload);
handle_message_ack(ack.seq());
return CONTINUE(read_frame);
}
/* Client Protocol Methods */
CtPtr ProtocolV2::start_client_banner_exchange() {
ldout(cct, 20) << __func__ << dendl;
INTERCEPT(1);
state = BANNER_CONNECTING;
global_seq = messenger->get_global_seq();
return _banner_exchange(CONTINUATION(post_client_banner_exchange));
}
CtPtr ProtocolV2::post_client_banner_exchange() {
ldout(cct, 20) << __func__ << dendl;
state = AUTH_CONNECTING;
return send_auth_request();
}
CtPtr ProtocolV2::send_auth_request(std::vector<uint32_t> &allowed_methods) {
ceph_assert(messenger->auth_client);
ldout(cct, 20) << __func__ << " peer_type " << (int)connection->peer_type
<< " auth_client " << messenger->auth_client << dendl;
ceph::bufferlist bl;
std::vector<uint32_t> preferred_modes;
auto am = auth_meta;
connection->lock.unlock();
int r = messenger->auth_client->get_auth_request(
connection, am.get(),
&am->auth_method, &preferred_modes, &bl);
connection->lock.lock();
if (state != AUTH_CONNECTING) {
ldout(cct, 1) << __func__ << " state changed!" << dendl;
return _fault();
}
if (r < 0) {
ldout(cct, 0) << __func__ << " get_initial_auth_request returned " << r
<< dendl;
stop();
connection->dispatch_queue->queue_reset(connection);
return nullptr;
}
INTERCEPT(9);
auto frame = AuthRequestFrame::Encode(auth_meta->auth_method, preferred_modes,
bl);
return WRITE(frame, "auth request", read_frame);
}
CtPtr ProtocolV2::handle_auth_bad_method(ceph::bufferlist &payload) {
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != AUTH_CONNECTING) {
lderr(cct) << __func__ << " not in auth connect state!" << dendl;
return _fault();
}
auto bad_method = AuthBadMethodFrame::Decode(payload);
ldout(cct, 1) << __func__ << " method=" << bad_method.method()
<< " result " << cpp_strerror(bad_method.result())
<< ", allowed methods=" << bad_method.allowed_methods()
<< ", allowed modes=" << bad_method.allowed_modes()
<< dendl;
ceph_assert(messenger->auth_client);
auto am = auth_meta;
connection->lock.unlock();
int r = messenger->auth_client->handle_auth_bad_method(
connection,
am.get(),
bad_method.method(), bad_method.result(),
bad_method.allowed_methods(),
bad_method.allowed_modes());
connection->lock.lock();
if (state != AUTH_CONNECTING || r < 0) {
return _fault();
}
return send_auth_request(bad_method.allowed_methods());
}
CtPtr ProtocolV2::handle_auth_reply_more(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != AUTH_CONNECTING) {
lderr(cct) << __func__ << " not in auth connect state!" << dendl;
return _fault();
}
auto auth_more = AuthReplyMoreFrame::Decode(payload);
ldout(cct, 5) << __func__
<< " auth reply more len=" << auth_more.auth_payload().length()
<< dendl;
ceph_assert(messenger->auth_client);
ceph::bufferlist reply;
auto am = auth_meta;
connection->lock.unlock();
int r = messenger->auth_client->handle_auth_reply_more(
connection, am.get(), auth_more.auth_payload(), &reply);
connection->lock.lock();
if (state != AUTH_CONNECTING) {
ldout(cct, 1) << __func__ << " state changed!" << dendl;
return _fault();
}
if (r < 0) {
lderr(cct) << __func__ << " auth_client handle_auth_reply_more returned "
<< r << dendl;
return _fault();
}
auto more_reply = AuthRequestMoreFrame::Encode(reply);
return WRITE(more_reply, "auth request more", read_frame);
}
CtPtr ProtocolV2::handle_auth_done(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != AUTH_CONNECTING) {
lderr(cct) << __func__ << " not in auth connect state!" << dendl;
return _fault();
}
auto auth_done = AuthDoneFrame::Decode(payload);
ceph_assert(messenger->auth_client);
auto am = auth_meta;
connection->lock.unlock();
int r = messenger->auth_client->handle_auth_done(
connection,
am.get(),
auth_done.global_id(),
auth_done.con_mode(),
auth_done.auth_payload(),
&am->session_key,
&am->connection_secret);
connection->lock.lock();
if (state != AUTH_CONNECTING) {
ldout(cct, 1) << __func__ << " state changed!" << dendl;
return _fault();
}
if (r < 0) {
return _fault();
}
auth_meta->con_mode = auth_done.con_mode();
bool is_rev1 = HAVE_MSGR2_FEATURE(peer_supported_features, REVISION_1);
session_stream_handlers = ceph::crypto::onwire::rxtx_t::create_handler_pair(
cct, *auth_meta, /*new_nonce_format=*/is_rev1, /*crossed=*/false);
state = AUTH_CONNECTING_SIGN;
const auto sig = auth_meta->session_key.empty() ? sha256_digest_t() :
auth_meta->session_key.hmac_sha256(cct, pre_auth.rxbuf);
auto sig_frame = AuthSignatureFrame::Encode(sig);
pre_auth.enabled = false;
pre_auth.rxbuf.clear();
return WRITE(sig_frame, "auth signature", read_frame);
}
CtPtr ProtocolV2::finish_client_auth() {
if (HAVE_MSGR2_FEATURE(peer_supported_features, COMPRESSION)) {
return send_compression_request();
}
return start_session_connect();
}
CtPtr ProtocolV2::finish_server_auth() {
// server had sent AuthDone and client responded with correct pre-auth
// signature.
// We can start conditioanl msgr protocol
if (HAVE_MSGR2_FEATURE(peer_supported_features, COMPRESSION)) {
state = COMPRESSION_ACCEPTING;
} else {
// No msgr protocol features to process
// we can start accepting new sessions/reconnects.
state = SESSION_ACCEPTING;
}
return CONTINUE(read_frame);
}
CtPtr ProtocolV2::start_session_connect() {
if (!server_cookie) {
ceph_assert(connect_seq == 0);
state = SESSION_CONNECTING;
return send_client_ident();
} else { // reconnecting to previous session
state = SESSION_RECONNECTING;
ceph_assert(connect_seq > 0);
return send_reconnect();
}
}
CtPtr ProtocolV2::send_client_ident() {
ldout(cct, 20) << __func__ << dendl;
if (!connection->policy.lossy && !client_cookie) {
client_cookie = ceph::util::generate_random_number<uint64_t>(1, -1ll);
}
uint64_t flags = 0;
if (connection->policy.lossy) {
flags |= CEPH_MSG_CONNECT_LOSSY;
}
auto client_ident = ClientIdentFrame::Encode(
messenger->get_myaddrs(),
connection->target_addr,
messenger->get_myname().num(),
global_seq,
connection->policy.features_supported,
connection->policy.features_required | msgr2_required,
flags,
client_cookie);
ldout(cct, 5) << __func__ << " sending identification: "
<< "addrs=" << messenger->get_myaddrs()
<< " target=" << connection->target_addr
<< " gid=" << messenger->get_myname().num()
<< " global_seq=" << global_seq
<< " features_supported=" << std::hex
<< connection->policy.features_supported
<< " features_required="
<< (connection->policy.features_required | msgr2_required)
<< " flags=" << flags
<< " cookie=" << client_cookie << std::dec << dendl;
INTERCEPT(11);
return WRITE(client_ident, "client ident", read_frame);
}
CtPtr ProtocolV2::send_reconnect() {
ldout(cct, 20) << __func__ << dendl;
auto reconnect = ReconnectFrame::Encode(messenger->get_myaddrs(),
client_cookie,
server_cookie,
global_seq,
connect_seq,
in_seq);
ldout(cct, 5) << __func__ << " reconnect to session: client_cookie="
<< std::hex << client_cookie << " server_cookie="
<< server_cookie << std::dec
<< " gs=" << global_seq << " cs=" << connect_seq
<< " ms=" << in_seq << dendl;
INTERCEPT(13);
return WRITE(reconnect, "reconnect", read_frame);
}
CtPtr ProtocolV2::handle_ident_missing_features(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_CONNECTING) {
lderr(cct) << __func__ << " not in session connect state!" << dendl;
return _fault();
}
auto ident_missing =
IdentMissingFeaturesFrame::Decode(payload);
lderr(cct) << __func__
<< " client does not support all server features: " << std::hex
<< ident_missing.features() << std::dec << dendl;
return _fault();
}
CtPtr ProtocolV2::handle_session_reset(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_RECONNECTING) {
lderr(cct) << __func__ << " not in session reconnect state!" << dendl;
return _fault();
}
auto reset = ResetFrame::Decode(payload);
ldout(cct, 1) << __func__ << " received session reset full=" << reset.full()
<< dendl;
if (reset.full()) {
reset_session();
} else {
server_cookie = 0;
connect_seq = 0;
in_seq = 0;
}
state = SESSION_CONNECTING;
return send_client_ident();
}
CtPtr ProtocolV2::handle_session_retry(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_RECONNECTING) {
lderr(cct) << __func__ << " not in session reconnect state!" << dendl;
return _fault();
}
auto retry = RetryFrame::Decode(payload);
connect_seq = retry.connect_seq() + 1;
ldout(cct, 1) << __func__
<< " received session retry connect_seq=" << retry.connect_seq()
<< ", inc to cs=" << connect_seq << dendl;
return send_reconnect();
}
CtPtr ProtocolV2::handle_session_retry_global(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_RECONNECTING) {
lderr(cct) << __func__ << " not in session reconnect state!" << dendl;
return _fault();
}
auto retry = RetryGlobalFrame::Decode(payload);
global_seq = messenger->get_global_seq(retry.global_seq());
ldout(cct, 1) << __func__ << " received session retry global global_seq="
<< retry.global_seq() << ", choose new gs=" << global_seq
<< dendl;
return send_reconnect();
}
CtPtr ProtocolV2::handle_wait(ceph::bufferlist &payload) {
ldout(cct, 20) << __func__
<< " received WAIT (connection race)"
<< " payload.length()=" << payload.length()
<< dendl;
if (state != SESSION_CONNECTING && state != SESSION_RECONNECTING) {
lderr(cct) << __func__ << " not in session (re)connect state!" << dendl;
return _fault();
}
state = WAIT;
WaitFrame::Decode(payload);
return _fault();
}
CtPtr ProtocolV2::handle_reconnect_ok(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_RECONNECTING) {
lderr(cct) << __func__ << " not in session reconnect state!" << dendl;
return _fault();
}
auto reconnect_ok = ReconnectOkFrame::Decode(payload);
ldout(cct, 5) << __func__
<< " reconnect accepted: sms=" << reconnect_ok.msg_seq()
<< dendl;
out_seq = discard_requeued_up_to(out_seq, reconnect_ok.msg_seq());
backoff = utime_t();
ldout(cct, 10) << __func__ << " reconnect success " << connect_seq
<< ", lossy = " << connection->policy.lossy << ", features "
<< connection->get_features() << dendl;
if (connection->delay_state) {
ceph_assert(connection->delay_state->ready());
}
connection->dispatch_queue->queue_connect(connection);
messenger->ms_deliver_handle_fast_connect(connection);
return ready();
}
CtPtr ProtocolV2::handle_server_ident(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_CONNECTING) {
lderr(cct) << __func__ << " not in session connect state!" << dendl;
return _fault();
}
auto server_ident = ServerIdentFrame::Decode(payload);
ldout(cct, 5) << __func__ << " received server identification:"
<< " addrs=" << server_ident.addrs()
<< " gid=" << server_ident.gid()
<< " global_seq=" << server_ident.global_seq()
<< " features_supported=" << std::hex
<< server_ident.supported_features()
<< " features_required=" << server_ident.required_features()
<< " flags=" << server_ident.flags()
<< " cookie=" << server_ident.cookie() << std::dec << dendl;
// is this who we intended to talk to?
// be a bit forgiving here, since we may be connecting based on addresses parsed out
// of mon_host or something.
if (!server_ident.addrs().contains(connection->target_addr)) {
ldout(cct,1) << __func__ << " peer identifies as " << server_ident.addrs()
<< ", does not include " << connection->target_addr << dendl;
return _fault();
}
server_cookie = server_ident.cookie();
connection->set_peer_addrs(server_ident.addrs());
peer_name = entity_name_t(connection->get_peer_type(), server_ident.gid());
connection->set_features(server_ident.supported_features() &
connection->policy.features_supported);
peer_global_seq = server_ident.global_seq();
connection->policy.lossy = server_ident.flags() & CEPH_MSG_CONNECT_LOSSY;
backoff = utime_t();
ldout(cct, 10) << __func__ << " connect success " << connect_seq
<< ", lossy = " << connection->policy.lossy << ", features "
<< connection->get_features() << dendl;
if (connection->delay_state) {
ceph_assert(connection->delay_state->ready());
}
connection->dispatch_queue->queue_connect(connection);
messenger->ms_deliver_handle_fast_connect(connection);
return ready();
}
CtPtr ProtocolV2::send_compression_request() {
state = COMPRESSION_CONNECTING;
const entity_type_t peer_type = connection->get_peer_type();
comp_meta.con_mode =
static_cast<Compressor::CompressionMode>(
messenger->comp_registry.get_mode(peer_type, auth_meta->is_mode_secure()));
const auto preferred_methods = messenger->comp_registry.get_methods(peer_type);
auto comp_req_frame = CompressionRequestFrame::Encode(comp_meta.is_compress(), preferred_methods);
INTERCEPT(19);
return WRITE(comp_req_frame, "compression request", read_frame);
}
CtPtr ProtocolV2::handle_compression_done(ceph::bufferlist &payload) {
if (state != COMPRESSION_CONNECTING) {
lderr(cct) << __func__ << " state changed!" << dendl;
return _fault();
}
auto response = CompressionDoneFrame::Decode(payload);
ldout(cct, 10) << __func__ << " CompressionDoneFrame(is_compress=" << response.is_compress()
<< ", method=" << response.method() << ")" << dendl;
comp_meta.con_method = static_cast<Compressor::CompressionAlgorithm>(response.method());
if (comp_meta.is_compress() != response.is_compress()) {
comp_meta.con_mode = Compressor::COMP_NONE;
}
session_compression_handlers = ceph::compression::onwire::rxtx_t::create_handler_pair(
cct, comp_meta, messenger->comp_registry.get_min_compression_size(connection->get_peer_type()));
return start_session_connect();
}
/* Server Protocol Methods */
CtPtr ProtocolV2::start_server_banner_exchange() {
ldout(cct, 20) << __func__ << dendl;
INTERCEPT(2);
state = BANNER_ACCEPTING;
return _banner_exchange(CONTINUATION(post_server_banner_exchange));
}
CtPtr ProtocolV2::post_server_banner_exchange() {
ldout(cct, 20) << __func__ << dendl;
state = AUTH_ACCEPTING;
return CONTINUE(read_frame);
}
CtPtr ProtocolV2::handle_auth_request(ceph::bufferlist &payload) {
ldout(cct, 20) << __func__ << " payload.length()=" << payload.length()
<< dendl;
if (state != AUTH_ACCEPTING) {
lderr(cct) << __func__ << " not in auth accept state!" << dendl;
return _fault();
}
auto request = AuthRequestFrame::Decode(payload);
ldout(cct, 10) << __func__ << " AuthRequest(method=" << request.method()
<< ", preferred_modes=" << request.preferred_modes()
<< ", payload_len=" << request.auth_payload().length() << ")"
<< dendl;
auth_meta->auth_method = request.method();
auth_meta->con_mode = messenger->auth_server->pick_con_mode(
connection->get_peer_type(), auth_meta->auth_method,
request.preferred_modes());
if (auth_meta->con_mode == CEPH_CON_MODE_UNKNOWN) {
return _auth_bad_method(-EOPNOTSUPP);
}
return _handle_auth_request(request.auth_payload(), false);
}
CtPtr ProtocolV2::_auth_bad_method(int r)
{
ceph_assert(r < 0);
std::vector<uint32_t> allowed_methods;
std::vector<uint32_t> allowed_modes;
messenger->auth_server->get_supported_auth_methods(
connection->get_peer_type(), &allowed_methods, &allowed_modes);
ldout(cct, 1) << __func__ << " auth_method " << auth_meta->auth_method
<< " r " << cpp_strerror(r)
<< ", allowed_methods " << allowed_methods
<< ", allowed_modes " << allowed_modes
<< dendl;
auto bad_method = AuthBadMethodFrame::Encode(auth_meta->auth_method, r,
allowed_methods, allowed_modes);
return WRITE(bad_method, "bad auth method", read_frame);
}
CtPtr ProtocolV2::_handle_auth_request(ceph::bufferlist& auth_payload, bool more)
{
if (!messenger->auth_server) {
return _fault();
}
ceph::bufferlist reply;
auto am = auth_meta;
connection->lock.unlock();
int r = messenger->auth_server->handle_auth_request(
connection, am.get(),
more, am->auth_method, auth_payload,
&reply);
connection->lock.lock();
if (state != AUTH_ACCEPTING && state != AUTH_ACCEPTING_MORE) {
ldout(cct, 1) << __func__
<< " state changed while accept, it must be mark_down"
<< dendl;
ceph_assert(state == CLOSED);
return _fault();
}
if (r == 1) {
INTERCEPT(10);
state = AUTH_ACCEPTING_SIGN;
auto auth_done = AuthDoneFrame::Encode(connection->peer_global_id,
auth_meta->con_mode,
reply);
return WRITE(auth_done, "auth done", finish_auth);
} else if (r == 0) {
state = AUTH_ACCEPTING_MORE;
auto more = AuthReplyMoreFrame::Encode(reply);
return WRITE(more, "auth reply more", read_frame);
} else if (r == -EBUSY) {
// kick the client and maybe they'll come back later
return _fault();
} else {
return _auth_bad_method(r);
}
}
CtPtr ProtocolV2::finish_auth()
{
ceph_assert(auth_meta);
// TODO: having a possibility to check whether we're server or client could
// allow reusing finish_auth().
bool is_rev1 = HAVE_MSGR2_FEATURE(peer_supported_features, REVISION_1);
session_stream_handlers = ceph::crypto::onwire::rxtx_t::create_handler_pair(
cct, *auth_meta, /*new_nonce_format=*/is_rev1, /*crossed=*/true);
const auto sig = auth_meta->session_key.empty() ? sha256_digest_t() :
auth_meta->session_key.hmac_sha256(cct, pre_auth.rxbuf);
auto sig_frame = AuthSignatureFrame::Encode(sig);
pre_auth.enabled = false;
pre_auth.rxbuf.clear();
return WRITE(sig_frame, "auth signature", read_frame);
}
CtPtr ProtocolV2::handle_auth_request_more(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != AUTH_ACCEPTING_MORE) {
lderr(cct) << __func__ << " not in auth accept more state!" << dendl;
return _fault();
}
auto auth_more = AuthRequestMoreFrame::Decode(payload);
return _handle_auth_request(auth_more.auth_payload(), true);
}
CtPtr ProtocolV2::handle_auth_signature(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != AUTH_ACCEPTING_SIGN && state != AUTH_CONNECTING_SIGN) {
lderr(cct) << __func__
<< " pre-auth verification signature seen in wrong state!"
<< dendl;
return _fault();
}
auto sig_frame = AuthSignatureFrame::Decode(payload);
const auto actual_tx_sig = auth_meta->session_key.empty() ?
sha256_digest_t() : auth_meta->session_key.hmac_sha256(cct, pre_auth.txbuf);
if (sig_frame.signature() != actual_tx_sig) {
ldout(cct, 2) << __func__ << " pre-auth signature mismatch"
<< " actual_tx_sig=" << actual_tx_sig
<< " sig_frame.signature()=" << sig_frame.signature()
<< dendl;
return _fault();
} else {
ldout(cct, 20) << __func__ << " pre-auth signature success"
<< " sig_frame.signature()=" << sig_frame.signature()
<< dendl;
pre_auth.txbuf.clear();
}
if (state == AUTH_ACCEPTING_SIGN) {
// this happened on server side
return finish_server_auth();
} else if (state == AUTH_CONNECTING_SIGN) {
// this happened at client side
return finish_client_auth();
} else {
ceph_abort("state corruption");
}
}
CtPtr ProtocolV2::handle_client_ident(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_ACCEPTING) {
lderr(cct) << __func__ << " not in session accept state!" << dendl;
return _fault();
}
auto client_ident = ClientIdentFrame::Decode(payload);
ldout(cct, 5) << __func__ << " received client identification:"
<< " addrs=" << client_ident.addrs()
<< " target=" << client_ident.target_addr()
<< " gid=" << client_ident.gid()
<< " global_seq=" << client_ident.global_seq()
<< " features_supported=" << std::hex
<< client_ident.supported_features()
<< " features_required=" << client_ident.required_features()
<< " flags=" << client_ident.flags()
<< " cookie=" << client_ident.cookie() << std::dec << dendl;
if (client_ident.addrs().empty() ||
client_ident.addrs().front() == entity_addr_t()) {
ldout(cct,5) << __func__ << " oops, client_ident.addrs() is empty" << dendl;
return _fault(); // a v2 peer should never do this
}
if (!messenger->get_myaddrs().contains(client_ident.target_addr())) {
ldout(cct,5) << __func__ << " peer is trying to reach "
<< client_ident.target_addr()
<< " which is not us (" << messenger->get_myaddrs() << ")"
<< dendl;
return _fault();
}
connection->set_peer_addrs(client_ident.addrs());
connection->target_addr = connection->_infer_target_addr(client_ident.addrs());
peer_name = entity_name_t(connection->get_peer_type(), client_ident.gid());
connection->set_peer_id(client_ident.gid());
client_cookie = client_ident.cookie();
uint64_t feat_missing =
(connection->policy.features_required | msgr2_required) &
~(uint64_t)client_ident.supported_features();
if (feat_missing) {
ldout(cct, 1) << __func__ << " peer missing required features " << std::hex
<< feat_missing << std::dec << dendl;
auto ident_missing_features =
IdentMissingFeaturesFrame::Encode(feat_missing);
return WRITE(ident_missing_features, "ident missing features", read_frame);
}
connection_features =
client_ident.supported_features() & connection->policy.features_supported;
peer_global_seq = client_ident.global_seq();
if (connection->policy.server &&
connection->policy.lossy &&
!connection->policy.register_lossy_clients) {
// incoming lossy client, no need to register this connection
} else {
// Looks good so far, let's check if there is already an existing connection
// to this peer.
connection->lock.unlock();
AsyncConnectionRef existing = messenger->lookup_conn(
*connection->peer_addrs);
if (existing &&
existing->protocol->proto_type != 2) {
ldout(cct,1) << __func__ << " existing " << existing << " proto "
<< existing->protocol.get() << " version is "
<< existing->protocol->proto_type << ", marking down"
<< dendl;
existing->mark_down();
existing = nullptr;
}
connection->inject_delay();
connection->lock.lock();
if (state != SESSION_ACCEPTING) {
ldout(cct, 1) << __func__
<< " state changed while accept, it must be mark_down"
<< dendl;
ceph_assert(state == CLOSED);
return _fault();
}
if (existing) {
return handle_existing_connection(existing);
}
}
// if everything is OK reply with server identification
return send_server_ident();
}
CtPtr ProtocolV2::handle_reconnect(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != SESSION_ACCEPTING) {
lderr(cct) << __func__ << " not in session accept state!" << dendl;
return _fault();
}
auto reconnect = ReconnectFrame::Decode(payload);
ldout(cct, 5) << __func__
<< " received reconnect:"
<< " client_cookie=" << std::hex << reconnect.client_cookie()
<< " server_cookie=" << reconnect.server_cookie() << std::dec
<< " gs=" << reconnect.global_seq()
<< " cs=" << reconnect.connect_seq()
<< " ms=" << reconnect.msg_seq()
<< dendl;
// Should we check if one of the ident.addrs match connection->target_addr
// as we do in ProtocolV1?
connection->set_peer_addrs(reconnect.addrs());
connection->target_addr = connection->_infer_target_addr(reconnect.addrs());
peer_global_seq = reconnect.global_seq();
connection->lock.unlock();
AsyncConnectionRef existing = messenger->lookup_conn(*connection->peer_addrs);
if (existing &&
existing->protocol->proto_type != 2) {
ldout(cct,1) << __func__ << " existing " << existing << " proto "
<< existing->protocol.get() << " version is "
<< existing->protocol->proto_type << ", marking down" << dendl;
existing->mark_down();
existing = nullptr;
}
connection->inject_delay();
connection->lock.lock();
if (state != SESSION_ACCEPTING) {
ldout(cct, 1) << __func__
<< " state changed while accept, it must be mark_down"
<< dendl;
ceph_assert(state == CLOSED);
return _fault();
}
if (!existing) {
// there is no existing connection therefore cannot reconnect to previous
// session
ldout(cct, 0) << __func__
<< " no existing connection exists, reseting client" << dendl;
auto reset = ResetFrame::Encode(true);
return WRITE(reset, "session reset", read_frame);
}
std::lock_guard<std::mutex> l(existing->lock);
ProtocolV2 *exproto = dynamic_cast<ProtocolV2 *>(existing->protocol.get());
if (!exproto) {
ldout(cct, 1) << __func__ << " existing=" << existing << dendl;
ceph_assert(false);
}
if (exproto->state == CLOSED) {
ldout(cct, 5) << __func__ << " existing " << existing
<< " already closed. Reseting client" << dendl;
auto reset = ResetFrame::Encode(true);
return WRITE(reset, "session reset", read_frame);
}
if (exproto->replacing) {
ldout(cct, 1) << __func__
<< " existing racing replace happened while replacing."
<< " existing=" << existing << dendl;
auto retry = RetryGlobalFrame::Encode(exproto->peer_global_seq);
return WRITE(retry, "session retry", read_frame);
}
if (exproto->client_cookie != reconnect.client_cookie()) {
ldout(cct, 1) << __func__ << " existing=" << existing
<< " client cookie mismatch, I must have reseted:"
<< " cc=" << std::hex << exproto->client_cookie
<< " rcc=" << reconnect.client_cookie()
<< ", reseting client." << std::dec
<< dendl;
auto reset = ResetFrame::Encode(connection->policy.resetcheck);
return WRITE(reset, "session reset", read_frame);
} else if (exproto->server_cookie == 0) {
// this happens when:
// - a connects to b
// - a sends client_ident
// - b gets client_ident, sends server_ident and sets cookie X
// - connection fault
// - b reconnects to a with cookie X, connect_seq=1
// - a has cookie==0
ldout(cct, 1) << __func__ << " I was a client and didn't received the"
<< " server_ident. Asking peer to resume session"
<< " establishment" << dendl;
auto reset = ResetFrame::Encode(false);
return WRITE(reset, "session reset", read_frame);
}
if (exproto->peer_global_seq > reconnect.global_seq()) {
ldout(cct, 5) << __func__
<< " stale global_seq: sgs=" << exproto->peer_global_seq
<< " cgs=" << reconnect.global_seq()
<< ", ask client to retry global" << dendl;
auto retry = RetryGlobalFrame::Encode(exproto->peer_global_seq);
INTERCEPT(18);
return WRITE(retry, "session retry", read_frame);
}
if (exproto->connect_seq > reconnect.connect_seq()) {
ldout(cct, 5) << __func__
<< " stale connect_seq scs=" << exproto->connect_seq
<< " ccs=" << reconnect.connect_seq()
<< " , ask client to retry" << dendl;
auto retry = RetryFrame::Encode(exproto->connect_seq);
return WRITE(retry, "session retry", read_frame);
}
if (exproto->connect_seq == reconnect.connect_seq()) {
// reconnect race: both peers are sending reconnect messages
if (existing->peer_addrs->msgr2_addr() >
messenger->get_myaddrs().msgr2_addr() &&
!existing->policy.server) {
// the existing connection wins
ldout(cct, 1)
<< __func__
<< " reconnect race detected, this connection loses to existing="
<< existing << dendl;
auto wait = WaitFrame::Encode();
return WRITE(wait, "wait", read_frame);
} else {
// this connection wins
ldout(cct, 1) << __func__
<< " reconnect race detected, replacing existing="
<< existing << " socket by this connection's socket"
<< dendl;
}
}
ldout(cct, 1) << __func__ << " reconnect to existing=" << existing << dendl;
reconnecting = true;
// everything looks good
exproto->connect_seq = reconnect.connect_seq();
exproto->message_seq = reconnect.msg_seq();
return reuse_connection(existing, exproto);
}
CtPtr ProtocolV2::handle_existing_connection(const AsyncConnectionRef& existing) {
ldout(cct, 20) << __func__ << " existing=" << existing << dendl;
std::unique_lock<std::mutex> l(existing->lock);
ProtocolV2 *exproto = dynamic_cast<ProtocolV2 *>(existing->protocol.get());
if (!exproto) {
ldout(cct, 1) << __func__ << " existing=" << existing << dendl;
ceph_assert(false);
}
if (exproto->state == CLOSED) {
ldout(cct, 1) << __func__ << " existing " << existing << " already closed."
<< dendl;
l.unlock();
return send_server_ident();
}
if (exproto->replacing) {
ldout(cct, 1) << __func__
<< " existing racing replace happened while replacing."
<< " existing=" << existing << dendl;
auto wait = WaitFrame::Encode();
return WRITE(wait, "wait", read_frame);
}
if (exproto->peer_global_seq > peer_global_seq) {
ldout(cct, 1) << __func__ << " this is a stale connection, peer_global_seq="
<< peer_global_seq
<< " existing->peer_global_seq=" << exproto->peer_global_seq
<< ", stopping this connection." << dendl;
stop();
connection->dispatch_queue->queue_reset(connection);
return nullptr;
}
if (existing->policy.lossy) {
// existing connection can be thrown out in favor of this one
ldout(cct, 1)
<< __func__ << " existing=" << existing
<< " is a lossy channel. Stopping existing in favor of this connection"
<< dendl;
existing->protocol->stop();
existing->dispatch_queue->queue_reset(existing.get());
l.unlock();
return send_server_ident();
}
if (exproto->server_cookie && exproto->client_cookie &&
exproto->client_cookie != client_cookie) {
// Found previous session
// peer has reseted and we're going to reuse the existing connection
// by replacing the communication socket
ldout(cct, 1) << __func__ << " found previous session existing=" << existing
<< ", peer must have reseted." << dendl;
if (connection->policy.resetcheck) {
exproto->reset_session();
}
return reuse_connection(existing, exproto);
}
if (exproto->client_cookie == client_cookie) {
// session establishment interrupted between client_ident and server_ident,
// continuing...
ldout(cct, 1) << __func__ << " found previous session existing=" << existing
<< ", continuing session establishment." << dendl;
return reuse_connection(existing, exproto);
}
if (exproto->state == READY || exproto->state == STANDBY) {
ldout(cct, 1) << __func__ << " existing=" << existing
<< " is READY/STANDBY, lets reuse it" << dendl;
return reuse_connection(existing, exproto);
}
// Looks like a connection race: server and client are both connecting to
// each other at the same time.
if (connection->peer_addrs->msgr2_addr() <
messenger->get_myaddrs().msgr2_addr() ||
existing->policy.server) {
// this connection wins
ldout(cct, 1) << __func__
<< " connection race detected, replacing existing="
<< existing << " socket by this connection's socket" << dendl;
return reuse_connection(existing, exproto);
} else {
// the existing connection wins
ldout(cct, 1)
<< __func__
<< " connection race detected, this connection loses to existing="
<< existing << dendl;
ceph_assert(connection->peer_addrs->msgr2_addr() >
messenger->get_myaddrs().msgr2_addr());
// make sure we follow through with opening the existing
// connection (if it isn't yet open) since we know the peer
// has something to send to us.
existing->send_keepalive();
auto wait = WaitFrame::Encode();
return WRITE(wait, "wait", read_frame);
}
}
CtPtr ProtocolV2::reuse_connection(const AsyncConnectionRef& existing,
ProtocolV2 *exproto) {
ldout(cct, 20) << __func__ << " existing=" << existing
<< " reconnect=" << reconnecting << dendl;
connection->inject_delay();
std::lock_guard<std::mutex> l(existing->write_lock);
connection->center->delete_file_event(connection->cs.fd(),
EVENT_READABLE | EVENT_WRITABLE);
if (existing->delay_state) {
existing->delay_state->flush();
ceph_assert(!connection->delay_state);
}
exproto->reset_recv_state();
exproto->pre_auth.enabled = false;
if (!reconnecting) {
exproto->client_cookie = client_cookie;
exproto->peer_name = peer_name;
exproto->connection_features = connection_features;
existing->set_features(connection_features);
exproto->peer_supported_features = peer_supported_features;
}
exproto->peer_global_seq = peer_global_seq;
ceph_assert(connection->center->in_thread());
auto temp_cs = std::move(connection->cs);
EventCenter *new_center = connection->center;
Worker *new_worker = connection->worker;
// we can steal the session_stream_handlers under the assumption
// this happens in the event center's thread as there should be
// no user outside its boundaries (simlarly to e.g. outgoing_bl).
auto temp_stream_handlers = std::move(session_stream_handlers);
auto temp_compression_handlers = std::move(session_compression_handlers);
exproto->auth_meta = auth_meta;
exproto->comp_meta = comp_meta;
ldout(messenger->cct, 5) << __func__ << " stop myself to swap existing"
<< dendl;
// avoid _stop shutdown replacing socket
// queue a reset on the new connection, which we're dumping for the old
stop();
connection->dispatch_queue->queue_reset(connection);
exproto->can_write = false;
exproto->write_in_progress = false;
exproto->reconnecting = reconnecting;
exproto->replacing = true;
existing->state_offset = 0;
// avoid previous thread modify event
exproto->state = NONE;
existing->state = AsyncConnection::STATE_NONE;
// Discard existing prefetch buffer in `recv_buf`
existing->recv_start = existing->recv_end = 0;
// there shouldn't exist any buffer
ceph_assert(connection->recv_start == connection->recv_end);
auto deactivate_existing = std::bind(
[ existing,
new_worker,
new_center,
exproto,
reconnecting=reconnecting,
tx_is_rev1=tx_frame_asm.get_is_rev1(),
rx_is_rev1=rx_frame_asm.get_is_rev1(),
temp_stream_handlers=std::move(temp_stream_handlers),
temp_compression_handlers=std::move(temp_compression_handlers)
](ConnectedSocket &cs) mutable {
// we need to delete time event in original thread
{
std::lock_guard<std::mutex> l(existing->lock);
existing->write_lock.lock();
exproto->requeue_sent();
// XXX: do we really need the locking for `outgoing_bl`? There is
// a comment just above its definition saying "lockfree, only used
// in own thread". I'm following lockfull schema just in the case.
// From performance point of view it should be fine – this happens
// far away from hot paths.
existing->outgoing_bl.clear();
existing->open_write = false;
exproto->session_stream_handlers = std::move(temp_stream_handlers);
exproto->session_compression_handlers = std::move(temp_compression_handlers);
if (!reconnecting) {
exproto->tx_frame_asm.set_is_rev1(tx_is_rev1);
exproto->rx_frame_asm.set_is_rev1(rx_is_rev1);
}
existing->write_lock.unlock();
if (exproto->state == NONE) {
existing->shutdown_socket();
existing->cs = std::move(cs);
existing->worker->references--;
new_worker->references++;
existing->logger = new_worker->get_perf_counter();
existing->labeled_logger = new_worker->get_labeled_perf_counter();
existing->worker = new_worker;
existing->center = new_center;
if (existing->delay_state)
existing->delay_state->set_center(new_center);
} else if (exproto->state == CLOSED) {
auto back_to_close = std::bind(
[](ConnectedSocket &cs) mutable { cs.close(); }, std::move(cs));
new_center->submit_to(new_center->get_id(),
std::move(back_to_close), true);
return;
} else {
ceph_abort();
}
}
// Before changing existing->center, it may already exists some
// events in existing->center's queue. Then if we mark down
// `existing`, it will execute in another thread and clean up
// connection. Previous event will result in segment fault
auto transfer_existing = [existing, exproto]() mutable {
std::lock_guard<std::mutex> l(existing->lock);
if (exproto->state == CLOSED) return;
ceph_assert(exproto->state == NONE);
exproto->state = SESSION_ACCEPTING;
// we have called shutdown_socket above
ceph_assert(existing->last_tick_id == 0);
// restart timer since we are going to re-build connection
existing->last_connect_started = ceph::coarse_mono_clock::now();
existing->last_tick_id = existing->center->create_time_event(
existing->connect_timeout_us, existing->tick_handler);
existing->state = AsyncConnection::STATE_CONNECTION_ESTABLISHED;
existing->center->create_file_event(existing->cs.fd(), EVENT_READABLE,
existing->read_handler);
if (!exproto->reconnecting) {
exproto->run_continuation(exproto->send_server_ident());
} else {
exproto->run_continuation(exproto->send_reconnect_ok());
}
};
if (existing->center->in_thread())
transfer_existing();
else
existing->center->submit_to(existing->center->get_id(),
std::move(transfer_existing), true);
},
std::move(temp_cs));
existing->center->submit_to(existing->center->get_id(),
std::move(deactivate_existing), true);
return nullptr;
}
CtPtr ProtocolV2::send_server_ident() {
ldout(cct, 20) << __func__ << dendl;
// this is required for the case when this connection is being replaced
out_seq = discard_requeued_up_to(out_seq, 0);
in_seq = 0;
if (!connection->policy.lossy) {
server_cookie = ceph::util::generate_random_number<uint64_t>(1, -1ll);
}
uint64_t flags = 0;
if (connection->policy.lossy) {
flags = flags | CEPH_MSG_CONNECT_LOSSY;
}
uint64_t gs = messenger->get_global_seq();
auto server_ident = ServerIdentFrame::Encode(
messenger->get_myaddrs(),
messenger->get_myname().num(),
gs,
connection->policy.features_supported,
connection->policy.features_required | msgr2_required,
flags,
server_cookie);
ldout(cct, 5) << __func__ << " sending identification:"
<< " addrs=" << messenger->get_myaddrs()
<< " gid=" << messenger->get_myname().num()
<< " global_seq=" << gs << " features_supported=" << std::hex
<< connection->policy.features_supported
<< " features_required="
<< (connection->policy.features_required | msgr2_required)
<< " flags=" << flags
<< " cookie=" << server_cookie << std::dec << dendl;
connection->lock.unlock();
// Because "replacing" will prevent other connections preempt this addr,
// it's safe that here we don't acquire Connection's lock
ssize_t r = messenger->accept_conn(connection);
connection->inject_delay();
connection->lock.lock();
if (r < 0) {
ldout(cct, 1) << __func__ << " existing race replacing process for addr = "
<< connection->peer_addrs->msgr2_addr()
<< " just fail later one(this)" << dendl;
connection->inject_delay();
return _fault();
}
if (state != SESSION_ACCEPTING) {
ldout(cct, 1) << __func__
<< " state changed while accept_conn, it must be mark_down"
<< dendl;
ceph_assert(state == CLOSED || state == NONE);
messenger->unregister_conn(connection);
connection->inject_delay();
return _fault();
}
connection->set_features(connection_features);
// notify
connection->dispatch_queue->queue_accept(connection);
messenger->ms_deliver_handle_fast_accept(connection);
INTERCEPT(12);
return WRITE(server_ident, "server ident", server_ready);
}
CtPtr ProtocolV2::server_ready() {
ldout(cct, 20) << __func__ << dendl;
if (connection->delay_state) {
ceph_assert(connection->delay_state->ready());
}
return ready();
}
CtPtr ProtocolV2::send_reconnect_ok() {
ldout(cct, 20) << __func__ << dendl;
out_seq = discard_requeued_up_to(out_seq, message_seq);
uint64_t ms = in_seq;
auto reconnect_ok = ReconnectOkFrame::Encode(ms);
ldout(cct, 5) << __func__ << " sending reconnect_ok: msg_seq=" << ms << dendl;
connection->lock.unlock();
// Because "replacing" will prevent other connections preempt this addr,
// it's safe that here we don't acquire Connection's lock
ssize_t r = messenger->accept_conn(connection);
connection->inject_delay();
connection->lock.lock();
if (r < 0) {
ldout(cct, 1) << __func__ << " existing race replacing process for addr = "
<< connection->peer_addrs->msgr2_addr()
<< " just fail later one(this)" << dendl;
connection->inject_delay();
return _fault();
}
if (state != SESSION_ACCEPTING) {
ldout(cct, 1) << __func__
<< " state changed while accept_conn, it must be mark_down"
<< dendl;
ceph_assert(state == CLOSED || state == NONE);
messenger->unregister_conn(connection);
connection->inject_delay();
return _fault();
}
// notify
connection->dispatch_queue->queue_accept(connection);
messenger->ms_deliver_handle_fast_accept(connection);
INTERCEPT(14);
return WRITE(reconnect_ok, "reconnect ok", server_ready);
}
CtPtr ProtocolV2::handle_compression_request(ceph::bufferlist &payload) {
if (state != COMPRESSION_ACCEPTING) {
lderr(cct) << __func__ << " state changed!" << dendl;
return _fault();
}
auto request = CompressionRequestFrame::Decode(payload);
ldout(cct, 10) << __func__ << " CompressionRequestFrame(is_compress=" << request.is_compress()
<< ", preferred_methods=" << request.preferred_methods() << ")" << dendl;
const int peer_type = connection->get_peer_type();
if (Compressor::CompressionMode mode = messenger->comp_registry.get_mode(
peer_type, auth_meta->is_mode_secure());
mode != Compressor::COMP_NONE && request.is_compress()) {
comp_meta.con_method = messenger->comp_registry.pick_method(peer_type, request.preferred_methods());
ldout(cct, 10) << __func__ << " Compressor(pick_method="
<< Compressor::get_comp_alg_name(comp_meta.get_method())
<< ")" << dendl;
if (comp_meta.con_method != Compressor::COMP_ALG_NONE) {
comp_meta.con_mode = mode;
}
} else {
comp_meta.con_method = Compressor::COMP_ALG_NONE;
}
auto response = CompressionDoneFrame::Encode(comp_meta.is_compress(), comp_meta.get_method());
INTERCEPT(20);
return WRITE(response, "compression done", finish_compression);
}
CtPtr ProtocolV2::finish_compression() {
// TODO: having a possibility to check whether we're server or client could
// allow reusing finish_compression().
session_compression_handlers = ceph::compression::onwire::rxtx_t::create_handler_pair(
cct, comp_meta, messenger->comp_registry.get_min_compression_size(connection->get_peer_type()));
state = SESSION_ACCEPTING;
return CONTINUE(read_frame);
}
| 101,661 | 32.49654 | 107 |
cc
|
null |
ceph-main/src/msg/async/ProtocolV2.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef _MSG_ASYNC_PROTOCOL_V2_
#define _MSG_ASYNC_PROTOCOL_V2_
#include "Protocol.h"
#include "crypto_onwire.h"
#include "compression_meta.h"
#include "compression_onwire.h"
#include "frames_v2.h"
class ProtocolV2 : public Protocol {
private:
enum State {
NONE,
START_CONNECT,
BANNER_CONNECTING,
HELLO_CONNECTING,
AUTH_CONNECTING,
AUTH_CONNECTING_SIGN,
COMPRESSION_CONNECTING,
SESSION_CONNECTING,
SESSION_RECONNECTING,
START_ACCEPT,
BANNER_ACCEPTING,
HELLO_ACCEPTING,
AUTH_ACCEPTING,
AUTH_ACCEPTING_MORE,
AUTH_ACCEPTING_SIGN,
COMPRESSION_ACCEPTING,
SESSION_ACCEPTING,
READY,
THROTTLE_MESSAGE,
THROTTLE_BYTES,
THROTTLE_DISPATCH_QUEUE,
THROTTLE_DONE,
READ_MESSAGE_COMPLETE,
STANDBY,
WAIT,
CLOSED
};
static const char *get_state_name(int state) {
const char *const statenames[] = {"NONE",
"START_CONNECT",
"BANNER_CONNECTING",
"HELLO_CONNECTING",
"AUTH_CONNECTING",
"AUTH_CONNECTING_SIGN",
"COMPRESSION_CONNECTING",
"SESSION_CONNECTING",
"SESSION_RECONNECTING",
"START_ACCEPT",
"BANNER_ACCEPTING",
"HELLO_ACCEPTING",
"AUTH_ACCEPTING",
"AUTH_ACCEPTING_MORE",
"AUTH_ACCEPTING_SIGN",
"COMPRESSION_ACCEPTING",
"SESSION_ACCEPTING",
"READY",
"THROTTLE_MESSAGE",
"THROTTLE_BYTES",
"THROTTLE_DISPATCH_QUEUE",
"THROTTLE_DONE",
"READ_MESSAGE_COMPLETE",
"STANDBY",
"WAIT",
"CLOSED"};
return statenames[state];
}
// TODO: move into auth_meta?
ceph::crypto::onwire::rxtx_t session_stream_handlers;
ceph::compression::onwire::rxtx_t session_compression_handlers;
private:
entity_name_t peer_name;
State state;
uint64_t peer_supported_features; // CEPH_MSGR2_FEATURE_*
uint64_t client_cookie;
uint64_t server_cookie;
uint64_t global_seq;
uint64_t connect_seq;
uint64_t peer_global_seq;
uint64_t message_seq;
bool reconnecting;
bool replacing;
bool can_write;
struct out_queue_entry_t {
bool is_prepared {false};
Message* m {nullptr};
};
std::map<int, std::list<out_queue_entry_t>> out_queue;
std::list<Message *> sent;
std::atomic<uint64_t> out_seq{0};
std::atomic<uint64_t> in_seq{0};
std::atomic<uint64_t> ack_left{0};
using ProtFuncPtr = void (ProtocolV2::*)();
Ct<ProtocolV2> *bannerExchangeCallback;
ceph::msgr::v2::FrameAssembler tx_frame_asm;
ceph::msgr::v2::FrameAssembler rx_frame_asm;
ceph::bufferlist rx_preamble;
ceph::bufferlist rx_epilogue;
ceph::msgr::v2::segment_bls_t rx_segments_data;
ceph::msgr::v2::Tag next_tag;
utime_t backoff; // backoff time
utime_t recv_stamp;
utime_t throttle_stamp;
struct {
ceph::bufferlist rxbuf;
ceph::bufferlist txbuf;
bool enabled {true};
} pre_auth;
bool keepalive;
bool write_in_progress = false;
CompConnectionMeta comp_meta;
std::ostream& _conn_prefix(std::ostream *_dout);
void run_continuation(Ct<ProtocolV2> *pcontinuation);
void run_continuation(Ct<ProtocolV2> &continuation);
Ct<ProtocolV2> *read(CONTINUATION_RXBPTR_TYPE<ProtocolV2> &next,
rx_buffer_t&& buffer);
template <class F>
Ct<ProtocolV2> *write(const std::string &desc,
CONTINUATION_TYPE<ProtocolV2> &next,
F &frame);
Ct<ProtocolV2> *write(const std::string &desc,
CONTINUATION_TYPE<ProtocolV2> &next,
ceph::bufferlist &buffer);
template <class F>
bool append_frame(F& frame);
void requeue_sent();
uint64_t discard_requeued_up_to(uint64_t out_seq, uint64_t seq);
void reset_recv_state();
void reset_security();
void reset_throttle();
Ct<ProtocolV2> *_fault();
void discard_out_queue();
void reset_session();
void prepare_send_message(uint64_t features, Message *m);
out_queue_entry_t _get_next_outgoing();
ssize_t write_message(Message *m, bool more);
void handle_message_ack(uint64_t seq);
void reset_compression();
CONTINUATION_DECL(ProtocolV2, _wait_for_peer_banner);
READ_BPTR_HANDLER_CONTINUATION_DECL(ProtocolV2, _handle_peer_banner);
READ_BPTR_HANDLER_CONTINUATION_DECL(ProtocolV2, _handle_peer_banner_payload);
Ct<ProtocolV2> *_banner_exchange(Ct<ProtocolV2> &callback);
Ct<ProtocolV2> *_wait_for_peer_banner();
Ct<ProtocolV2> *_handle_peer_banner(rx_buffer_t &&buffer, int r);
Ct<ProtocolV2> *_handle_peer_banner_payload(rx_buffer_t &&buffer, int r);
Ct<ProtocolV2> *handle_hello(ceph::bufferlist &payload);
CONTINUATION_DECL(ProtocolV2, read_frame);
CONTINUATION_DECL(ProtocolV2, finish_auth);
READ_BPTR_HANDLER_CONTINUATION_DECL(ProtocolV2, handle_read_frame_preamble_main);
READ_BPTR_HANDLER_CONTINUATION_DECL(ProtocolV2, handle_read_frame_segment);
READ_BPTR_HANDLER_CONTINUATION_DECL(ProtocolV2, handle_read_frame_epilogue_main);
CONTINUATION_DECL(ProtocolV2, throttle_message);
CONTINUATION_DECL(ProtocolV2, throttle_bytes);
CONTINUATION_DECL(ProtocolV2, throttle_dispatch_queue);
CONTINUATION_DECL(ProtocolV2, finish_compression);
Ct<ProtocolV2> *read_frame();
Ct<ProtocolV2> *finish_auth();
Ct<ProtocolV2> *finish_client_auth();
Ct<ProtocolV2> *finish_server_auth();
Ct<ProtocolV2> *handle_read_frame_preamble_main(rx_buffer_t &&buffer, int r);
Ct<ProtocolV2> *read_frame_segment();
Ct<ProtocolV2> *handle_read_frame_segment(rx_buffer_t &&rx_buffer, int r);
Ct<ProtocolV2> *_handle_read_frame_segment();
Ct<ProtocolV2> *handle_read_frame_epilogue_main(rx_buffer_t &&buffer, int r);
Ct<ProtocolV2> *_handle_read_frame_epilogue_main();
Ct<ProtocolV2> *handle_read_frame_dispatch();
Ct<ProtocolV2> *handle_frame_payload();
Ct<ProtocolV2> *finish_compression();
Ct<ProtocolV2> *ready();
Ct<ProtocolV2> *handle_message();
Ct<ProtocolV2> *throttle_message();
Ct<ProtocolV2> *throttle_bytes();
Ct<ProtocolV2> *throttle_dispatch_queue();
Ct<ProtocolV2> *handle_keepalive2(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_keepalive2_ack(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_message_ack(ceph::bufferlist &payload);
public:
uint64_t connection_features;
ProtocolV2(AsyncConnection *connection);
virtual ~ProtocolV2();
virtual void connect() override;
virtual void accept() override;
virtual bool is_connected() override;
virtual void stop() override;
virtual void fault() override;
virtual void send_message(Message *m) override;
virtual void send_keepalive() override;
virtual void read_event() override;
virtual void write_event() override;
virtual bool is_queued() override;
private:
// Client Protocol
CONTINUATION_DECL(ProtocolV2, start_client_banner_exchange);
CONTINUATION_DECL(ProtocolV2, post_client_banner_exchange);
Ct<ProtocolV2> *start_client_banner_exchange();
Ct<ProtocolV2> *post_client_banner_exchange();
inline Ct<ProtocolV2> *send_auth_request() {
std::vector<uint32_t> empty;
return send_auth_request(empty);
}
Ct<ProtocolV2> *send_auth_request(std::vector<uint32_t> &allowed_methods);
Ct<ProtocolV2> *handle_auth_bad_method(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_auth_reply_more(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_auth_done(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_auth_signature(ceph::bufferlist &payload);
Ct<ProtocolV2> *start_session_connect();
Ct<ProtocolV2> *send_client_ident();
Ct<ProtocolV2> *send_reconnect();
Ct<ProtocolV2> *handle_ident_missing_features(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_session_reset(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_session_retry(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_session_retry_global(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_wait(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_reconnect_ok(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_server_ident(ceph::bufferlist &payload);
Ct<ProtocolV2> *send_compression_request();
Ct<ProtocolV2> *handle_compression_done(ceph::bufferlist &payload);
// Server Protocol
CONTINUATION_DECL(ProtocolV2, start_server_banner_exchange);
CONTINUATION_DECL(ProtocolV2, post_server_banner_exchange);
CONTINUATION_DECL(ProtocolV2, server_ready);
Ct<ProtocolV2> *start_server_banner_exchange();
Ct<ProtocolV2> *post_server_banner_exchange();
Ct<ProtocolV2> *handle_auth_request(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_auth_request_more(ceph::bufferlist &payload);
Ct<ProtocolV2> *_handle_auth_request(ceph::bufferlist& auth_payload, bool more);
Ct<ProtocolV2> *_auth_bad_method(int r);
Ct<ProtocolV2> *handle_client_ident(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_ident_missing_features_write(int r);
Ct<ProtocolV2> *handle_reconnect(ceph::bufferlist &payload);
Ct<ProtocolV2> *handle_existing_connection(const AsyncConnectionRef& existing);
Ct<ProtocolV2> *reuse_connection(const AsyncConnectionRef& existing,
ProtocolV2 *exproto);
Ct<ProtocolV2> *send_server_ident();
Ct<ProtocolV2> *send_reconnect_ok();
Ct<ProtocolV2> *server_ready();
Ct<ProtocolV2> *handle_compression_request(ceph::bufferlist &payload);
size_t get_current_msg_size() const;
};
#endif /* _MSG_ASYNC_PROTOCOL_V2_ */
| 10,197 | 35.815884 | 83 |
h
|
null |
ceph-main/src/msg/async/Stack.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <mutex>
#include "include/compat.h"
#include "common/Cond.h"
#include "common/errno.h"
#include "PosixStack.h"
#ifdef HAVE_RDMA
#include "rdma/RDMAStack.h"
#endif
#ifdef HAVE_DPDK
#include "dpdk/DPDKStack.h"
#endif
#include "common/dout.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "stack "
std::function<void ()> NetworkStack::add_thread(Worker* w)
{
return [this, w]() {
rename_thread(w->id);
const unsigned EventMaxWaitUs = 30000000;
w->center.set_owner();
ldout(cct, 10) << __func__ << " starting" << dendl;
w->initialize();
w->init_done();
while (!w->done) {
ldout(cct, 30) << __func__ << " calling event process" << dendl;
ceph::timespan dur;
int r = w->center.process_events(EventMaxWaitUs, &dur);
if (r < 0) {
ldout(cct, 20) << __func__ << " process events failed: "
<< cpp_strerror(errno) << dendl;
// TODO do something?
}
w->perf_logger->tinc(l_msgr_running_total_time, dur);
}
w->reset();
w->destroy();
};
}
std::shared_ptr<NetworkStack> NetworkStack::create(CephContext *c,
const std::string &t)
{
std::shared_ptr<NetworkStack> stack = nullptr;
if (t == "posix")
stack.reset(new PosixNetworkStack(c));
#ifdef HAVE_RDMA
else if (t == "rdma")
stack.reset(new RDMAStack(c));
#endif
#ifdef HAVE_DPDK
else if (t == "dpdk")
stack.reset(new DPDKStack(c));
#endif
if (stack == nullptr) {
lderr(c) << __func__ << " ms_async_transport_type " << t <<
" is not supported! " << dendl;
ceph_abort();
return nullptr;
}
unsigned num_workers = c->_conf->ms_async_op_threads;
ceph_assert(num_workers > 0);
if (num_workers >= EventCenter::MAX_EVENTCENTER) {
ldout(c, 0) << __func__ << " max thread limit is "
<< EventCenter::MAX_EVENTCENTER << ", switching to this now. "
<< "Higher thread values are unnecessary and currently unsupported."
<< dendl;
num_workers = EventCenter::MAX_EVENTCENTER;
}
const int InitEventNumber = 5000;
for (unsigned worker_id = 0; worker_id < num_workers; ++worker_id) {
Worker *w = stack->create_worker(c, worker_id);
int ret = w->center.init(InitEventNumber, worker_id, t);
if (ret)
throw std::system_error(-ret, std::generic_category());
stack->workers.push_back(w);
}
return stack;
}
NetworkStack::NetworkStack(CephContext *c)
: cct(c)
{}
void NetworkStack::start()
{
std::unique_lock<decltype(pool_spin)> lk(pool_spin);
if (started) {
return ;
}
for (Worker* worker : workers) {
if (worker->is_init())
continue;
spawn_worker(add_thread(worker));
}
started = true;
lk.unlock();
for (Worker* worker : workers) {
worker->wait_for_init();
}
}
Worker* NetworkStack::get_worker()
{
ldout(cct, 30) << __func__ << dendl;
// start with some reasonably large number
unsigned min_load = std::numeric_limits<int>::max();
Worker* current_best = nullptr;
pool_spin.lock();
// find worker with least references
// tempting case is returning on references == 0, but in reality
// this will happen so rarely that there's no need for special case.
for (Worker* worker : workers) {
unsigned worker_load = worker->references.load();
if (worker_load < min_load) {
current_best = worker;
min_load = worker_load;
}
}
pool_spin.unlock();
ceph_assert(current_best);
++current_best->references;
return current_best;
}
void NetworkStack::stop()
{
std::lock_guard lk(pool_spin);
unsigned i = 0;
for (Worker* worker : workers) {
worker->done = true;
worker->center.wakeup();
join_worker(i++);
}
started = false;
}
class C_drain : public EventCallback {
ceph::mutex drain_lock = ceph::make_mutex("C_drain::drain_lock");
ceph::condition_variable drain_cond;
unsigned drain_count;
public:
explicit C_drain(size_t c)
: drain_count(c) {}
void do_request(uint64_t id) override {
std::lock_guard l{drain_lock};
drain_count--;
if (drain_count == 0) drain_cond.notify_all();
}
void wait() {
std::unique_lock l{drain_lock};
drain_cond.wait(l, [this] { return drain_count == 0; });
}
};
void NetworkStack::drain()
{
ldout(cct, 30) << __func__ << " started." << dendl;
pthread_t cur = pthread_self();
pool_spin.lock();
C_drain drain(get_num_worker());
for (Worker* worker : workers) {
ceph_assert(cur != worker->center.get_owner());
worker->center.dispatch_event_external(EventCallbackRef(&drain));
}
pool_spin.unlock();
drain.wait();
ldout(cct, 30) << __func__ << " end." << dendl;
}
| 5,235 | 24.793103 | 86 |
cc
|
null |
ceph-main/src/msg/async/Stack.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 XSKY <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_ASYNC_STACK_H
#define CEPH_MSG_ASYNC_STACK_H
#include "common/perf_counters.h"
#include "common/perf_counters_key.h"
#include "include/spinlock.h"
#include "msg/async/Event.h"
#include "msg/msg_types.h"
#include <string>
class Worker;
class ConnectedSocketImpl {
public:
virtual ~ConnectedSocketImpl() {}
virtual int is_connected() = 0;
virtual ssize_t read(char*, size_t) = 0;
virtual ssize_t send(ceph::buffer::list &bl, bool more) = 0;
virtual void shutdown() = 0;
virtual void close() = 0;
virtual int fd() const = 0;
virtual void set_priority(int sd, int prio, int domain) = 0;
};
class ConnectedSocket;
struct SocketOptions {
bool nonblock = true;
bool nodelay = true;
int rcbuf_size = 0;
int priority = -1;
entity_addr_t connect_bind_addr;
};
/// \cond internal
class ServerSocketImpl {
public:
unsigned addr_type; ///< entity_addr_t::TYPE_*
unsigned addr_slot; ///< position of our addr in myaddrs().v
ServerSocketImpl(unsigned type, unsigned slot)
: addr_type(type), addr_slot(slot) {}
virtual ~ServerSocketImpl() {}
virtual int accept(ConnectedSocket *sock, const SocketOptions &opt, entity_addr_t *out, Worker *w) = 0;
virtual void abort_accept() = 0;
/// Get file descriptor
virtual int fd() const = 0;
};
/// \endcond
/// \addtogroup networking-module
/// @{
/// A TCP (or other stream-based protocol) connection.
///
/// A \c ConnectedSocket represents a full-duplex stream between
/// two endpoints, a local endpoint and a remote endpoint.
class ConnectedSocket {
std::unique_ptr<ConnectedSocketImpl> _csi;
public:
/// Constructs a \c ConnectedSocket not corresponding to a connection
ConnectedSocket() {};
/// \cond internal
explicit ConnectedSocket(std::unique_ptr<ConnectedSocketImpl> csi)
: _csi(std::move(csi)) {}
/// \endcond
~ConnectedSocket() {
if (_csi)
_csi->close();
}
/// Moves a \c ConnectedSocket object.
ConnectedSocket(ConnectedSocket&& cs) = default;
/// Move-assigns a \c ConnectedSocket object.
ConnectedSocket& operator=(ConnectedSocket&& cs) = default;
int is_connected() {
return _csi->is_connected();
}
/// Read the input stream with copy.
///
/// Copy an object returning data sent from the remote endpoint.
ssize_t read(char* buf, size_t len) {
return _csi->read(buf, len);
}
/// Gets the output stream.
///
/// Gets an object that sends data to the remote endpoint.
ssize_t send(ceph::buffer::list &bl, bool more) {
return _csi->send(bl, more);
}
/// Disables output to the socket.
///
/// Current or future writes that have not been successfully flushed
/// will immediately fail with an error. This is useful to abort
/// operations on a socket that is not making progress due to a
/// peer failure.
void shutdown() {
return _csi->shutdown();
}
/// Disables input from the socket.
///
/// Current or future reads will immediately fail with an error.
/// This is useful to abort operations on a socket that is not making
/// progress due to a peer failure.
void close() {
_csi->close();
_csi.reset();
}
/// Get file descriptor
int fd() const {
return _csi->fd();
}
void set_priority(int sd, int prio, int domain) {
_csi->set_priority(sd, prio, domain);
}
explicit operator bool() const {
return _csi.get();
}
};
/// @}
/// \addtogroup networking-module
/// @{
/// A listening socket, waiting to accept incoming network connections.
class ServerSocket {
std::unique_ptr<ServerSocketImpl> _ssi;
public:
/// Constructs a \c ServerSocket not corresponding to a connection
ServerSocket() {}
/// \cond internal
explicit ServerSocket(std::unique_ptr<ServerSocketImpl> ssi)
: _ssi(std::move(ssi)) {}
~ServerSocket() {
if (_ssi)
_ssi->abort_accept();
}
/// \endcond
/// Moves a \c ServerSocket object.
ServerSocket(ServerSocket&& ss) = default;
/// Move-assigns a \c ServerSocket object.
ServerSocket& operator=(ServerSocket&& cs) = default;
/// Accepts the next connection to successfully connect to this socket.
///
/// \Accepts a \ref ConnectedSocket representing the connection, and
/// a \ref entity_addr_t describing the remote endpoint.
int accept(ConnectedSocket *sock, const SocketOptions &opt, entity_addr_t *out, Worker *w) {
return _ssi->accept(sock, opt, out, w);
}
/// Stops any \ref accept() in progress.
///
/// Current and future \ref accept() calls will terminate immediately
/// with an error.
void abort_accept() {
_ssi->abort_accept();
_ssi.reset();
}
/// Get file descriptor
int fd() const {
return _ssi->fd();
}
/// get listen/bind addr
unsigned get_addr_slot() {
return _ssi->addr_slot;
}
explicit operator bool() const {
return _ssi.get();
}
};
/// @}
class NetworkStack;
enum {
l_msgr_first = 94000,
l_msgr_recv_messages,
l_msgr_send_messages,
l_msgr_recv_bytes,
l_msgr_send_bytes,
l_msgr_created_connections,
l_msgr_active_connections,
l_msgr_running_total_time,
l_msgr_running_send_time,
l_msgr_running_recv_time,
l_msgr_running_fast_dispatch_time,
l_msgr_send_messages_queue_lat,
l_msgr_handle_ack_lat,
l_msgr_recv_encrypted_bytes,
l_msgr_send_encrypted_bytes,
l_msgr_last,
};
enum {
l_msgr_labeled_first = l_msgr_last + 1,
l_msgr_connection_ready_timeouts,
l_msgr_connection_idle_timeouts,
l_msgr_labeled_last,
};
class Worker {
std::mutex init_lock;
std::condition_variable init_cond;
bool init = false;
public:
bool done = false;
CephContext *cct;
PerfCounters *perf_logger;
PerfCounters *perf_labeled_logger;
unsigned id;
std::atomic_uint references;
EventCenter center;
Worker(const Worker&) = delete;
Worker& operator=(const Worker&) = delete;
Worker(CephContext *c, unsigned worker_id)
: cct(c), id(worker_id), references(0), center(c) {
char name[128];
char name_prefix[] = "AsyncMessenger::Worker";
sprintf(name, "%s-%u", name_prefix, id);
// initialize perf_logger
PerfCountersBuilder plb(cct, name, l_msgr_first, l_msgr_last);
plb.add_u64_counter(l_msgr_recv_messages, "msgr_recv_messages", "Network received messages");
plb.add_u64_counter(l_msgr_send_messages, "msgr_send_messages", "Network sent messages");
plb.add_u64_counter(l_msgr_recv_bytes, "msgr_recv_bytes", "Network received bytes", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_msgr_send_bytes, "msgr_send_bytes", "Network sent bytes", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_msgr_active_connections, "msgr_active_connections", "Active connection number");
plb.add_u64_counter(l_msgr_created_connections, "msgr_created_connections", "Created connection number");
plb.add_time(l_msgr_running_total_time, "msgr_running_total_time", "The total time of thread running");
plb.add_time(l_msgr_running_send_time, "msgr_running_send_time", "The total time of message sending");
plb.add_time(l_msgr_running_recv_time, "msgr_running_recv_time", "The total time of message receiving");
plb.add_time(l_msgr_running_fast_dispatch_time, "msgr_running_fast_dispatch_time", "The total time of fast dispatch");
plb.add_time_avg(l_msgr_send_messages_queue_lat, "msgr_send_messages_queue_lat", "Network sent messages lat");
plb.add_time_avg(l_msgr_handle_ack_lat, "msgr_handle_ack_lat", "Connection handle ack lat");
plb.add_u64_counter(l_msgr_recv_encrypted_bytes, "msgr_recv_encrypted_bytes", "Network received encrypted bytes", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_msgr_send_encrypted_bytes, "msgr_send_encrypted_bytes", "Network sent encrypted bytes", NULL, 0, unit_t(UNIT_BYTES));
perf_logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perf_logger);
// Add labeled perfcounters
std::string labels = ceph::perf_counters::key_create(
name_prefix, {{"id", std::to_string(id)}});
PerfCountersBuilder plb_labeled(
cct, labels, l_msgr_labeled_first,
l_msgr_labeled_last);
plb_labeled.add_u64_counter(
l_msgr_connection_ready_timeouts, "msgr_connection_ready_timeouts",
"Number of not yet ready connections declared as dead", NULL,
PerfCountersBuilder::PRIO_USEFUL);
plb_labeled.add_u64_counter(
l_msgr_connection_idle_timeouts, "msgr_connection_idle_timeouts",
"Number of connections closed due to idleness", NULL,
PerfCountersBuilder::PRIO_USEFUL);
perf_labeled_logger = plb_labeled.create_perf_counters();
cct->get_perfcounters_collection()->add(perf_labeled_logger);
}
virtual ~Worker() {
if (perf_logger) {
cct->get_perfcounters_collection()->remove(perf_logger);
delete perf_logger;
}
if (perf_labeled_logger) {
cct->get_perfcounters_collection()->remove(perf_labeled_logger);
delete perf_labeled_logger;
}
}
virtual int listen(entity_addr_t &addr, unsigned addr_slot,
const SocketOptions &opts, ServerSocket *) = 0;
virtual int connect(const entity_addr_t &addr,
const SocketOptions &opts, ConnectedSocket *socket) = 0;
virtual void destroy() {}
virtual void initialize() {}
PerfCounters *get_perf_counter() { return perf_logger; }
PerfCounters *get_labeled_perf_counter() { return perf_labeled_logger; }
void release_worker() {
int oldref = references.fetch_sub(1);
ceph_assert(oldref > 0);
}
void init_done() {
init_lock.lock();
init = true;
init_cond.notify_all();
init_lock.unlock();
}
bool is_init() {
std::lock_guard<std::mutex> l(init_lock);
return init;
}
void wait_for_init() {
std::unique_lock<std::mutex> l(init_lock);
while (!init)
init_cond.wait(l);
}
void reset() {
init_lock.lock();
init = false;
init_cond.notify_all();
init_lock.unlock();
done = false;
}
};
class NetworkStack {
ceph::spinlock pool_spin;
bool started = false;
std::function<void ()> add_thread(Worker* w);
virtual Worker* create_worker(CephContext *c, unsigned i) = 0;
virtual void rename_thread(unsigned id) {
static constexpr int TASK_COMM_LEN = 16;
char tp_name[TASK_COMM_LEN];
sprintf(tp_name, "msgr-worker-%u", id);
ceph_pthread_setname(pthread_self(), tp_name);
}
protected:
CephContext *cct;
std::vector<Worker*> workers;
explicit NetworkStack(CephContext *c);
public:
NetworkStack(const NetworkStack &) = delete;
NetworkStack& operator=(const NetworkStack &) = delete;
virtual ~NetworkStack() {
for (auto &&w : workers)
delete w;
}
static std::shared_ptr<NetworkStack> create(
CephContext *c, const std::string &type);
// backend need to override this method if backend doesn't support shared
// listen table.
// For example, posix backend has in kernel global listen table. If one
// thread bind a port, other threads also aware this.
// But for dpdk backend, we maintain listen table in each thread. So we
// need to let each thread do binding port.
virtual bool support_local_listen_table() const { return false; }
virtual bool nonblock_connect_need_writable_event() const { return true; }
void start();
void stop();
virtual Worker *get_worker();
Worker *get_worker(unsigned worker_id) {
return workers[worker_id];
}
void drain();
unsigned get_num_worker() const {
return workers.size();
}
// direct is used in tests only
virtual void spawn_worker(std::function<void ()> &&) = 0;
virtual void join_worker(unsigned i) = 0;
virtual bool is_ready() { return true; };
virtual void ready() { };
};
#endif //CEPH_MSG_ASYNC_STACK_H
| 12,199 | 29.272953 | 147 |
h
|
null |
ceph-main/src/msg/async/compression_meta.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "compressor/Compressor.h"
struct CompConnectionMeta {
TOPNSPC::Compressor::CompressionMode con_mode =
TOPNSPC::Compressor::COMP_NONE; // negotiated mode
TOPNSPC::Compressor::CompressionAlgorithm con_method =
TOPNSPC::Compressor::COMP_ALG_NONE; // negotiated method
bool is_compress() const {
return con_mode != TOPNSPC::Compressor::COMP_NONE;
}
TOPNSPC::Compressor::CompressionAlgorithm get_method() const {
return con_method;
}
TOPNSPC::Compressor::CompressionMode get_mode() const {
return con_mode;
}
};
| 654 | 28.772727 | 70 |
h
|
null |
ceph-main/src/msg/async/compression_onwire.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "compression_onwire.h"
#include "compression_meta.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_ms
namespace ceph::compression::onwire {
rxtx_t rxtx_t::create_handler_pair(
CephContext* ctx,
const CompConnectionMeta& comp_meta,
std::uint64_t compress_min_size)
{
if (comp_meta.is_compress()) {
CompressorRef compressor = Compressor::create(ctx, comp_meta.get_method());
if (compressor) {
return {std::make_unique<RxHandler>(ctx, compressor),
std::make_unique<TxHandler>(ctx, compressor,
comp_meta.get_mode(),
compress_min_size)};
}
}
return {};
}
std::optional<ceph::bufferlist> TxHandler::compress(const ceph::bufferlist &input)
{
if (m_init_onwire_size < m_min_size) {
ldout(m_cct, 20) << __func__
<< " discovered frame that is smaller than threshold, aborting compression"
<< dendl;
return {};
}
m_compress_potential -= input.length();
ceph::bufferlist out;
if (input.length() == 0) {
ldout(m_cct, 20) << __func__
<< " discovered an empty segment, skipping compression without aborting"
<< dendl;
out.clear();
return out;
}
std::optional<int32_t> compressor_message;
if (m_compressor->compress(input, out, compressor_message)) {
return {};
} else {
ldout(m_cct, 20) << __func__ << " uncompressed.length()=" << input.length()
<< " compressed.length()=" << out.length() << dendl;
m_onwire_size += out.length();
return out;
}
}
std::optional<ceph::bufferlist> RxHandler::decompress(const ceph::bufferlist &input)
{
ceph::bufferlist out;
if (input.length() == 0) {
ldout(m_cct, 20) << __func__
<< " discovered an empty segment, skipping decompression without aborting"
<< dendl;
out.clear();
return out;
}
std::optional<int32_t> compressor_message;
if (m_compressor->decompress(input, out, compressor_message)) {
return {};
} else {
ldout(m_cct, 20) << __func__ << " compressed.length()=" << input.length()
<< " uncompressed.length()=" << out.length() << dendl;
return out;
}
}
void TxHandler::done()
{
ldout(m_cct, 25) << __func__ << " compression ratio=" << get_ratio() << dendl;
}
} // namespace ceph::compression::onwire
| 2,397 | 26.563218 | 84 |
cc
|
null |
ceph-main/src/msg/async/compression_onwire.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_COMPRESSION_ONWIRE_H
#define CEPH_COMPRESSION_ONWIRE_H
#include <cstdint>
#include <optional>
#include "compressor/Compressor.h"
#include "include/buffer.h"
class CompConnectionMeta;
namespace ceph::compression::onwire {
using Compressor = TOPNSPC::Compressor;
using CompressorRef = TOPNSPC::CompressorRef;
class Handler {
public:
Handler(CephContext* const cct, CompressorRef compressor)
: m_cct(cct), m_compressor(compressor) {}
protected:
CephContext* const m_cct;
CompressorRef m_compressor;
};
class RxHandler final : private Handler {
public:
RxHandler(CephContext* const cct, CompressorRef compressor)
: Handler(cct, compressor) {}
~RxHandler() {};
/**
* Decompresses a bufferlist
*
* @param input compressed bufferlist
* @param out decompressed bufferlist
*
* @returns true on success, false on failure
*/
std::optional<ceph::bufferlist> decompress(const ceph::bufferlist &input);
};
class TxHandler final : private Handler {
public:
TxHandler(CephContext* const cct, CompressorRef compressor, int mode, std::uint64_t min_size)
: Handler(cct, compressor),
m_min_size(min_size),
m_mode(static_cast<Compressor::CompressionMode>(mode))
{}
~TxHandler() {}
void reset_handler(int num_segments, uint64_t size) {
m_init_onwire_size = size;
m_compress_potential = size;
m_onwire_size = 0;
}
void done();
/**
* Compresses a bufferlist
*
* @param input bufferlist to compress
* @param out compressed bufferlist
*
* @returns true on success, false on failure
*/
std::optional<ceph::bufferlist> compress(const ceph::bufferlist &input);
double get_ratio() const {
return get_initial_size() / (double) get_final_size();
}
uint64_t get_initial_size() const {
return m_init_onwire_size;
}
uint64_t get_final_size() const {
return m_onwire_size;
}
private:
uint64_t m_min_size;
Compressor::CompressionMode m_mode;
uint64_t m_init_onwire_size;
uint64_t m_onwire_size;
uint64_t m_compress_potential;
};
struct rxtx_t {
std::unique_ptr<RxHandler> rx;
std::unique_ptr<TxHandler> tx;
static rxtx_t create_handler_pair(
CephContext* ctx,
const CompConnectionMeta& comp_meta,
std::uint64_t compress_min_size);
};
}
#endif // CEPH_COMPRESSION_ONWIRE_H
| 2,561 | 23.169811 | 97 |
h
|
null |
ceph-main/src/msg/async/crypto_onwire.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <array>
#include <openssl/evp.h>
#include "crypto_onwire.h"
#include "common/debug.h"
#include "common/ceph_crypto.h"
#include "include/types.h"
#define dout_subsys ceph_subsys_ms
namespace ceph::crypto::onwire {
static constexpr const std::size_t AESGCM_KEY_LEN{16};
static constexpr const std::size_t AESGCM_IV_LEN{12};
static constexpr const std::size_t AESGCM_TAG_LEN{16};
static constexpr const std::size_t AESGCM_BLOCK_LEN{16};
struct nonce_t {
ceph_le32 fixed;
ceph_le64 counter;
bool operator==(const nonce_t& rhs) const {
return !memcmp(this, &rhs, sizeof(*this));
}
} __attribute__((packed));
static_assert(sizeof(nonce_t) == AESGCM_IV_LEN);
using key_t = std::array<std::uint8_t, AESGCM_KEY_LEN>;
// http://www.mindspring.com/~dmcgrew/gcm-nist-6.pdf
// https://www.openssl.org/docs/man1.0.2/crypto/EVP_aes_128_gcm.html#GCM-mode
// https://wiki.openssl.org/index.php/EVP_Authenticated_Encryption_and_Decryption
// https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
class AES128GCM_OnWireTxHandler : public ceph::crypto::onwire::TxHandler {
CephContext* const cct;
std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)> ectx;
ceph::bufferlist buffer;
nonce_t nonce, initial_nonce;
bool used_initial_nonce;
bool new_nonce_format; // 64-bit counter?
static_assert(sizeof(nonce) == AESGCM_IV_LEN);
public:
AES128GCM_OnWireTxHandler(CephContext* const cct,
const key_t& key,
const nonce_t& nonce,
bool new_nonce_format)
: cct(cct),
ectx(EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free),
nonce(nonce), initial_nonce(nonce), used_initial_nonce(false),
new_nonce_format(new_nonce_format) {
ceph_assert_always(ectx);
ceph_assert_always(key.size() * CHAR_BIT == 128);
if (1 != EVP_EncryptInit_ex(ectx.get(), EVP_aes_128_gcm(),
nullptr, nullptr, nullptr)) {
throw std::runtime_error("EVP_EncryptInit_ex failed");
}
if(1 != EVP_EncryptInit_ex(ectx.get(), nullptr, nullptr,
key.data(), nullptr)) {
throw std::runtime_error("EVP_EncryptInit_ex failed");
}
}
~AES128GCM_OnWireTxHandler() override {
::TOPNSPC::crypto::zeroize_for_security(&nonce, sizeof(nonce));
::TOPNSPC::crypto::zeroize_for_security(&initial_nonce, sizeof(initial_nonce));
}
void reset_tx_handler(const uint32_t* first, const uint32_t* last) override;
void authenticated_encrypt_update(const ceph::bufferlist& plaintext) override;
ceph::bufferlist authenticated_encrypt_final() override;
};
void AES128GCM_OnWireTxHandler::reset_tx_handler(const uint32_t* first,
const uint32_t* last)
{
if (nonce == initial_nonce) {
if (used_initial_nonce) {
throw ceph::crypto::onwire::TxHandlerError("out of nonces");
}
used_initial_nonce = true;
}
if(1 != EVP_EncryptInit_ex(ectx.get(), nullptr, nullptr, nullptr,
reinterpret_cast<const unsigned char*>(&nonce))) {
throw std::runtime_error("EVP_EncryptInit_ex failed");
}
ceph_assert(buffer.get_append_buffer_unused_tail_length() == 0);
buffer.reserve(std::accumulate(first, last, AESGCM_TAG_LEN));
if (!new_nonce_format) {
// msgr2.0: 32-bit counter followed by 64-bit fixed field,
// susceptible to overflow!
nonce.fixed = nonce.fixed + 1;
} else {
nonce.counter = nonce.counter + 1;
}
}
void AES128GCM_OnWireTxHandler::authenticated_encrypt_update(
const ceph::bufferlist& plaintext)
{
ceph_assert(buffer.get_append_buffer_unused_tail_length() >=
plaintext.length());
auto filler = buffer.append_hole(plaintext.length());
for (const auto& plainbuf : plaintext.buffers()) {
int update_len = 0;
if(1 != EVP_EncryptUpdate(ectx.get(),
reinterpret_cast<unsigned char*>(filler.c_str()),
&update_len,
reinterpret_cast<const unsigned char*>(plainbuf.c_str()),
plainbuf.length())) {
throw std::runtime_error("EVP_EncryptUpdate failed");
}
ceph_assert_always(update_len >= 0);
ceph_assert(static_cast<unsigned>(update_len) == plainbuf.length());
filler.advance(update_len);
}
ldout(cct, 15) << __func__
<< " plaintext.length()=" << plaintext.length()
<< " buffer.length()=" << buffer.length()
<< dendl;
}
ceph::bufferlist AES128GCM_OnWireTxHandler::authenticated_encrypt_final()
{
int final_len = 0;
ceph_assert(buffer.get_append_buffer_unused_tail_length() ==
AESGCM_BLOCK_LEN);
auto filler = buffer.append_hole(AESGCM_BLOCK_LEN);
if(1 != EVP_EncryptFinal_ex(ectx.get(),
reinterpret_cast<unsigned char*>(filler.c_str()),
&final_len)) {
throw std::runtime_error("EVP_EncryptFinal_ex failed");
}
ceph_assert_always(final_len == 0);
static_assert(AESGCM_BLOCK_LEN == AESGCM_TAG_LEN);
if(1 != EVP_CIPHER_CTX_ctrl(ectx.get(),
EVP_CTRL_GCM_GET_TAG, AESGCM_TAG_LEN,
filler.c_str())) {
throw std::runtime_error("EVP_CIPHER_CTX_ctrl failed");
}
ldout(cct, 15) << __func__
<< " buffer.length()=" << buffer.length()
<< " final_len=" << final_len
<< dendl;
return std::move(buffer);
}
// RX PART
class AES128GCM_OnWireRxHandler : public ceph::crypto::onwire::RxHandler {
std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)> ectx;
nonce_t nonce;
bool new_nonce_format; // 64-bit counter?
static_assert(sizeof(nonce) == AESGCM_IV_LEN);
public:
AES128GCM_OnWireRxHandler(CephContext* const cct,
const key_t& key,
const nonce_t& nonce,
bool new_nonce_format)
: ectx(EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free),
nonce(nonce), new_nonce_format(new_nonce_format) {
ceph_assert_always(ectx);
ceph_assert_always(key.size() * CHAR_BIT == 128);
if (1 != EVP_DecryptInit_ex(ectx.get(), EVP_aes_128_gcm(),
nullptr, nullptr, nullptr)) {
throw std::runtime_error("EVP_DecryptInit_ex failed");
}
if(1 != EVP_DecryptInit_ex(ectx.get(), nullptr, nullptr,
key.data(), nullptr)) {
throw std::runtime_error("EVP_DecryptInit_ex failed");
}
}
~AES128GCM_OnWireRxHandler() override {
::TOPNSPC::crypto::zeroize_for_security(&nonce, sizeof(nonce));
}
std::uint32_t get_extra_size_at_final() override {
return AESGCM_TAG_LEN;
}
void reset_rx_handler() override;
void authenticated_decrypt_update(ceph::bufferlist& bl) override;
void authenticated_decrypt_update_final(ceph::bufferlist& bl) override;
};
void AES128GCM_OnWireRxHandler::reset_rx_handler()
{
if(1 != EVP_DecryptInit_ex(ectx.get(), nullptr, nullptr, nullptr,
reinterpret_cast<const unsigned char*>(&nonce))) {
throw std::runtime_error("EVP_DecryptInit_ex failed");
}
if (!new_nonce_format) {
// msgr2.0: 32-bit counter followed by 64-bit fixed field,
// susceptible to overflow!
nonce.fixed = nonce.fixed + 1;
} else {
nonce.counter = nonce.counter + 1;
}
}
void AES128GCM_OnWireRxHandler::authenticated_decrypt_update(
ceph::bufferlist& bl)
{
// discard cached crcs as we will be writing through c_str()
bl.invalidate_crc();
for (auto& buf : bl.buffers()) {
auto p = reinterpret_cast<unsigned char*>(const_cast<char*>(buf.c_str()));
int update_len = 0;
if (1 != EVP_DecryptUpdate(ectx.get(), p, &update_len, p, buf.length())) {
throw std::runtime_error("EVP_DecryptUpdate failed");
}
ceph_assert_always(update_len >= 0);
ceph_assert(static_cast<unsigned>(update_len) == buf.length());
}
}
void AES128GCM_OnWireRxHandler::authenticated_decrypt_update_final(
ceph::bufferlist& bl)
{
unsigned orig_len = bl.length();
ceph_assert(orig_len >= AESGCM_TAG_LEN);
// decrypt optional data. Caller is obliged to provide only signature but it
// may supply ciphertext as well. Combining the update + final is reflected
// combined together.
ceph::bufferlist auth_tag;
bl.splice(orig_len - AESGCM_TAG_LEN, AESGCM_TAG_LEN, &auth_tag);
if (bl.length() > 0) {
authenticated_decrypt_update(bl);
}
// we need to ensure the tag is stored in continuous memory.
if (1 != EVP_CIPHER_CTX_ctrl(ectx.get(), EVP_CTRL_GCM_SET_TAG,
AESGCM_TAG_LEN, auth_tag.c_str())) {
throw std::runtime_error("EVP_CIPHER_CTX_ctrl failed");
}
// I expect that 0 bytes will be appended. The call is supposed solely to
// authenticate the message.
{
int final_len = 0;
if (0 >= EVP_DecryptFinal_ex(ectx.get(), nullptr, &final_len)) {
throw MsgAuthError();
}
ceph_assert_always(final_len == 0);
ceph_assert(bl.length() + AESGCM_TAG_LEN == orig_len);
}
}
ceph::crypto::onwire::rxtx_t ceph::crypto::onwire::rxtx_t::create_handler_pair(
CephContext* cct,
const AuthConnectionMeta& auth_meta,
bool new_nonce_format,
bool crossed)
{
if (auth_meta.is_mode_secure()) {
ceph_assert_always(auth_meta.connection_secret.length() >= \
sizeof(key_t) + 2 * sizeof(nonce_t));
const char* secbuf = auth_meta.connection_secret.c_str();
key_t key;
{
::memcpy(key.data(), secbuf, sizeof(key));
secbuf += sizeof(key);
}
nonce_t rx_nonce;
{
::memcpy(&rx_nonce, secbuf, sizeof(rx_nonce));
secbuf += sizeof(rx_nonce);
}
nonce_t tx_nonce;
{
::memcpy(&tx_nonce, secbuf, sizeof(tx_nonce));
secbuf += sizeof(tx_nonce);
}
return {
std::make_unique<AES128GCM_OnWireRxHandler>(
cct, key, crossed ? tx_nonce : rx_nonce, new_nonce_format),
std::make_unique<AES128GCM_OnWireTxHandler>(
cct, key, crossed ? rx_nonce : tx_nonce, new_nonce_format)
};
} else {
return { nullptr, nullptr };
}
}
} // namespace ceph::crypto::onwire
| 9,742 | 30.429032 | 83 |
cc
|
null |
ceph-main/src/msg/async/crypto_onwire.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CRYPTO_ONWIRE_H
#define CEPH_CRYPTO_ONWIRE_H
#include <cstdint>
#include <memory>
#include "auth/Auth.h"
#include "include/buffer.h"
namespace ceph::math {
// TODO
template <typename T>
class always_aligned_t {
T val;
template <class... Args>
always_aligned_t(Args&&... args)
: val(std::forward<Args>(args)...) {
}
};
} // namespace ceph::math
namespace ceph::crypto::onwire {
struct MsgAuthError : public std::runtime_error {
MsgAuthError()
: runtime_error("message signature mismatch") {
}
};
struct TxHandlerError : public std::runtime_error {
TxHandlerError(const char* what)
: std::runtime_error(std::string("tx handler error: ") + what) {}
};
struct TxHandler {
virtual ~TxHandler() = default;
// Instance of TxHandler must be reset before doing any encrypt-update
// step. This applies also to situation when encrypt-final was already
// called and another round of update-...-update-final will take place.
//
// The input parameter informs implementation how the -update sequence
// is fragmented and allows to make concious decision about allocation
// or reusage of provided memory. One implementation could do in-place
// encryption while other might prefer one huge output buffer.
//
// It's undefined what will happen if client doesn't follow the order.
//
// TODO: switch to always_aligned_t
virtual void reset_tx_handler(const uint32_t* first,
const uint32_t* last) = 0;
void reset_tx_handler(std::initializer_list<uint32_t> update_size_sequence) {
if (update_size_sequence.size() > 0) {
const uint32_t* first = &*update_size_sequence.begin();
reset_tx_handler(first, first + update_size_sequence.size());
} else {
reset_tx_handler(nullptr, nullptr);
}
}
// Perform encryption. Client gives full ownership right to provided
// bufferlist. The method MUST NOT be called after _final() if there
// was no call to _reset().
virtual void authenticated_encrypt_update(
const ceph::bufferlist& plaintext) = 0;
// Generates authentication signature and returns bufferlist crafted
// basing on plaintext from preceding call to _update().
virtual ceph::bufferlist authenticated_encrypt_final() = 0;
};
class RxHandler {
public:
virtual ~RxHandler() = default;
// Transmitter can append extra bytes of ciphertext at the -final step.
// This method return how much was added, and thus let client translate
// plaintext size into ciphertext size to grab from wire.
virtual std::uint32_t get_extra_size_at_final() = 0;
// Instance of RxHandler must be reset before doing any decrypt-update
// step. This applies also to situation when decrypt-final was already
// called and another round of update-...-update-final will take place.
virtual void reset_rx_handler() = 0;
// Perform decryption ciphertext must be ALWAYS aligned to 16 bytes.
virtual void authenticated_decrypt_update(ceph::bufferlist& bl) = 0;
// Perform decryption of last cipertext's portion and verify signature
// for overall decryption sequence.
// Throws on integrity/authenticity checks
virtual void authenticated_decrypt_update_final(ceph::bufferlist& bl) = 0;
};
struct rxtx_t {
//rxtx_t(rxtx_t&& r) : rx(std::move(rx)), tx(std::move(tx)) {}
// Each peer can use different handlers.
// Hmm, isn't that too much flexbility?
std::unique_ptr<RxHandler> rx;
std::unique_ptr<TxHandler> tx;
static rxtx_t create_handler_pair(
CephContext* ctx,
const class AuthConnectionMeta& auth_meta,
bool new_nonce_format,
bool crossed);
};
} // namespace ceph::crypto::onwire
#endif // CEPH_CRYPTO_ONWIRE_H
| 4,123 | 30.480916 | 79 |
h
|
null |
ceph-main/src/msg/async/frames_v2.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "frames_v2.h"
#include <ostream>
#include <fmt/format.h>
namespace ceph::msgr::v2 {
// Unpads bufferlist to unpadded_len.
static void unpad_zero(bufferlist& bl, uint32_t unpadded_len) {
ceph_assert(bl.length() >= unpadded_len);
if (bl.length() > unpadded_len) {
bl.splice(unpadded_len, bl.length() - unpadded_len);
}
}
// Discards trailing empty segments, unless there is just one segment.
// A frame always has at least one (possibly empty) segment.
static size_t calc_num_segments(const bufferlist segment_bls[],
size_t segment_count) {
ceph_assert(segment_count > 0 && segment_count <= MAX_NUM_SEGMENTS);
for (size_t i = segment_count; i-- > 0; ) {
if (segment_bls[i].length() > 0) {
return i + 1;
}
}
return 1;
}
static void check_segment_crc(const bufferlist& segment_bl,
uint32_t expected_crc) {
uint32_t crc = segment_bl.crc32c(-1);
if (crc != expected_crc) {
throw FrameError(fmt::format(
"bad segment crc calculated={} expected={}", crc, expected_crc));
}
}
// Returns true if the frame is ready for dispatching, or false if
// it was aborted by the sender and must be dropped.
static bool check_epilogue_late_status(__u8 late_status) {
__u8 aborted = late_status & FRAME_LATE_STATUS_ABORTED_MASK;
if (aborted != FRAME_LATE_STATUS_ABORTED &&
aborted != FRAME_LATE_STATUS_COMPLETE) {
throw FrameError(fmt::format("bad late_status"));
}
return aborted == FRAME_LATE_STATUS_COMPLETE;
}
void FrameAssembler::fill_preamble(Tag tag,
preamble_block_t& preamble) const {
// FIPS zeroization audit 20191115: this memset is not security related.
::memset(&preamble, 0, sizeof(preamble));
preamble.tag = static_cast<__u8>(tag);
for (size_t i = 0; i < m_descs.size(); i++) {
preamble.segments[i].length = m_descs[i].logical_len;
preamble.segments[i].alignment = m_descs[i].align;
}
preamble.num_segments = m_descs.size();
preamble.flags = m_flags;
preamble.crc = ceph_crc32c(
0, reinterpret_cast<const unsigned char*>(&preamble),
sizeof(preamble) - sizeof(preamble.crc));
}
uint64_t FrameAssembler::get_frame_logical_len() const {
ceph_assert(!m_descs.empty());
uint64_t logical_len = 0;
for (size_t i = 0; i < m_descs.size(); i++) {
logical_len += m_descs[i].logical_len;
}
return logical_len;
}
uint64_t FrameAssembler::get_frame_onwire_len() const {
ceph_assert(!m_descs.empty());
uint64_t onwire_len = get_preamble_onwire_len();
for (size_t i = 0; i < m_descs.size(); i++) {
onwire_len += get_segment_onwire_len(i);
}
onwire_len += get_epilogue_onwire_len();
return onwire_len;
}
bufferlist FrameAssembler::asm_crc_rev0(const preamble_block_t& preamble,
bufferlist segment_bls[]) const {
epilogue_crc_rev0_block_t epilogue;
// FIPS zeroization audit 20191115: this memset is not security related.
::memset(&epilogue, 0, sizeof(epilogue));
bufferlist frame_bl(sizeof(preamble) + sizeof(epilogue));
frame_bl.append(reinterpret_cast<const char*>(&preamble), sizeof(preamble));
for (size_t i = 0; i < m_descs.size(); i++) {
ceph_assert(segment_bls[i].length() == m_descs[i].logical_len);
epilogue.crc_values[i] = m_with_data_crc ? segment_bls[i].crc32c(-1) : 0;
if (segment_bls[i].length() > 0) {
frame_bl.claim_append(segment_bls[i]);
}
}
frame_bl.append(reinterpret_cast<const char*>(&epilogue), sizeof(epilogue));
return frame_bl;
}
bufferlist FrameAssembler::asm_secure_rev0(const preamble_block_t& preamble,
bufferlist segment_bls[]) const {
bufferlist preamble_bl(sizeof(preamble));
preamble_bl.append(reinterpret_cast<const char*>(&preamble),
sizeof(preamble));
epilogue_secure_rev0_block_t epilogue;
// FIPS zeroization audit 20191115: this memset is not security related.
::memset(&epilogue, 0, sizeof(epilogue));
bufferlist epilogue_bl(sizeof(epilogue));
epilogue_bl.append(reinterpret_cast<const char*>(&epilogue),
sizeof(epilogue));
// preamble + MAX_NUM_SEGMENTS + epilogue
uint32_t onwire_lens[MAX_NUM_SEGMENTS + 2];
onwire_lens[0] = preamble_bl.length();
for (size_t i = 0; i < m_descs.size(); i++) {
onwire_lens[i + 1] = segment_bls[i].length(); // already padded
}
onwire_lens[m_descs.size() + 1] = epilogue_bl.length();
m_crypto->tx->reset_tx_handler(onwire_lens,
onwire_lens + m_descs.size() + 2);
m_crypto->tx->authenticated_encrypt_update(preamble_bl);
for (size_t i = 0; i < m_descs.size(); i++) {
if (segment_bls[i].length() > 0) {
m_crypto->tx->authenticated_encrypt_update(segment_bls[i]);
}
}
m_crypto->tx->authenticated_encrypt_update(epilogue_bl);
return m_crypto->tx->authenticated_encrypt_final();
}
bufferlist FrameAssembler::asm_crc_rev1(const preamble_block_t& preamble,
bufferlist segment_bls[]) const {
epilogue_crc_rev1_block_t epilogue;
// FIPS zeroization audit 20191115: this memset is not security related.
::memset(&epilogue, 0, sizeof(epilogue));
epilogue.late_status |= FRAME_LATE_STATUS_COMPLETE;
bufferlist frame_bl(sizeof(preamble) + FRAME_CRC_SIZE + sizeof(epilogue));
frame_bl.append(reinterpret_cast<const char*>(&preamble), sizeof(preamble));
ceph_assert(segment_bls[0].length() == m_descs[0].logical_len);
if (segment_bls[0].length() > 0) {
uint32_t crc = m_with_data_crc ? segment_bls[0].crc32c(-1) : 0;
frame_bl.claim_append(segment_bls[0]);
encode(crc, frame_bl);
}
if (m_descs.size() == 1) {
return frame_bl; // no epilogue if only one segment
}
for (size_t i = 1; i < m_descs.size(); i++) {
ceph_assert(segment_bls[i].length() == m_descs[i].logical_len);
epilogue.crc_values[i - 1] =
m_with_data_crc ? segment_bls[i].crc32c(-1) : 0;
if (segment_bls[i].length() > 0) {
frame_bl.claim_append(segment_bls[i]);
}
}
frame_bl.append(reinterpret_cast<const char*>(&epilogue), sizeof(epilogue));
return frame_bl;
}
bufferlist FrameAssembler::asm_secure_rev1(const preamble_block_t& preamble,
bufferlist segment_bls[]) const {
bufferlist preamble_bl;
if (segment_bls[0].length() > FRAME_PREAMBLE_INLINE_SIZE) {
// first segment is partially inlined, inline buffer is full
preamble_bl.reserve(sizeof(preamble));
preamble_bl.append(reinterpret_cast<const char*>(&preamble),
sizeof(preamble));
segment_bls[0].splice(0, FRAME_PREAMBLE_INLINE_SIZE, &preamble_bl);
} else {
// first segment is fully inlined, inline buffer may need padding
uint32_t pad_len = FRAME_PREAMBLE_INLINE_SIZE - segment_bls[0].length();
preamble_bl.reserve(sizeof(preamble) + pad_len);
preamble_bl.append(reinterpret_cast<const char*>(&preamble),
sizeof(preamble));
preamble_bl.claim_append(segment_bls[0]);
if (pad_len > 0) {
preamble_bl.append_zero(pad_len);
}
}
m_crypto->tx->reset_tx_handler({preamble_bl.length()});
m_crypto->tx->authenticated_encrypt_update(preamble_bl);
auto frame_bl = m_crypto->tx->authenticated_encrypt_final();
if (segment_bls[0].length() > 0) {
m_crypto->tx->reset_tx_handler({segment_bls[0].length()});
m_crypto->tx->authenticated_encrypt_update(segment_bls[0]);
frame_bl.claim_append(m_crypto->tx->authenticated_encrypt_final());
}
if (m_descs.size() == 1) {
return frame_bl; // no epilogue if only one segment
}
epilogue_secure_rev1_block_t epilogue;
// FIPS zeroization audit 20191115: this memset is not security related.
::memset(&epilogue, 0, sizeof(epilogue));
epilogue.late_status |= FRAME_LATE_STATUS_COMPLETE;
bufferlist epilogue_bl(sizeof(epilogue));
epilogue_bl.append(reinterpret_cast<const char*>(&epilogue),
sizeof(epilogue));
// MAX_NUM_SEGMENTS - 1 + epilogue
uint32_t onwire_lens[MAX_NUM_SEGMENTS];
for (size_t i = 1; i < m_descs.size(); i++) {
onwire_lens[i - 1] = segment_bls[i].length(); // already padded
}
onwire_lens[m_descs.size() - 1] = epilogue_bl.length();
m_crypto->tx->reset_tx_handler(onwire_lens, onwire_lens + m_descs.size());
for (size_t i = 1; i < m_descs.size(); i++) {
if (segment_bls[i].length() > 0) {
m_crypto->tx->authenticated_encrypt_update(segment_bls[i]);
}
}
m_crypto->tx->authenticated_encrypt_update(epilogue_bl);
frame_bl.claim_append(m_crypto->tx->authenticated_encrypt_final());
return frame_bl;
}
bufferlist FrameAssembler::assemble_frame(Tag tag, bufferlist segment_bls[],
const uint16_t segment_aligns[],
size_t segment_count) {
m_flags = 0;
m_descs.resize(calc_num_segments(segment_bls, segment_count));
for (size_t i = 0; i < m_descs.size(); i++) {
m_descs[i].logical_len = segment_bls[i].length();
m_descs[i].align = segment_aligns[i];
}
if (m_compression->tx) {
asm_compress(segment_bls);
}
preamble_block_t preamble;
fill_preamble(tag, preamble);
if (m_crypto->rx) {
for (size_t i = 0; i < m_descs.size(); i++) {
ceph_assert(segment_bls[i].length() == m_descs[i].logical_len);
// We're padding segments to biggest cipher's block size. Although
// AES-GCM can live without that as it's a stream cipher, we don't
// want to be fixed to stream ciphers only.
uint32_t padded_len = get_segment_padded_len(i);
if (padded_len > segment_bls[i].length()) {
uint32_t pad_len = padded_len - segment_bls[i].length();
segment_bls[i].reserve(pad_len);
segment_bls[i].append_zero(pad_len);
}
}
if (m_is_rev1) {
return asm_secure_rev1(preamble, segment_bls);
}
return asm_secure_rev0(preamble, segment_bls);
}
if (m_is_rev1) {
return asm_crc_rev1(preamble, segment_bls);
}
return asm_crc_rev0(preamble, segment_bls);
}
Tag FrameAssembler::disassemble_preamble(bufferlist& preamble_bl) {
if (m_crypto->rx) {
m_crypto->rx->reset_rx_handler();
if (m_is_rev1) {
ceph_assert(preamble_bl.length() == FRAME_PREAMBLE_WITH_INLINE_SIZE +
get_auth_tag_len());
m_crypto->rx->authenticated_decrypt_update_final(preamble_bl);
} else {
ceph_assert(preamble_bl.length() == sizeof(preamble_block_t));
m_crypto->rx->authenticated_decrypt_update(preamble_bl);
}
} else {
ceph_assert(preamble_bl.length() == sizeof(preamble_block_t));
}
// I expect ceph_le32 will make the endian conversion for me. Passing
// everything through ::Decode is unnecessary.
auto preamble = reinterpret_cast<const preamble_block_t*>(
preamble_bl.c_str());
// check preamble crc before any further processing
uint32_t crc = ceph_crc32c(
0, reinterpret_cast<const unsigned char*>(preamble),
sizeof(*preamble) - sizeof(preamble->crc));
if (crc != preamble->crc) {
throw FrameError(fmt::format(
"bad preamble crc calculated={} expected={}", crc, preamble->crc));
}
// see calc_num_segments()
if (preamble->num_segments < 1 ||
preamble->num_segments > MAX_NUM_SEGMENTS) {
throw FrameError(fmt::format(
"bad number of segments num_segments={}", preamble->num_segments));
}
if (preamble->num_segments > 1 &&
preamble->segments[preamble->num_segments - 1].length == 0) {
throw FrameError("last segment empty");
}
m_descs.resize(preamble->num_segments);
for (size_t i = 0; i < m_descs.size(); i++) {
m_descs[i].logical_len = preamble->segments[i].length;
m_descs[i].align = preamble->segments[i].alignment;
}
m_flags = preamble->flags;
// If frame has been compressed,
// we need to make sure the compression handler has been setup
ceph_assert_always(!is_compressed() || m_compression->rx);
return static_cast<Tag>(preamble->tag);
}
bool FrameAssembler::disasm_all_crc_rev0(bufferlist segment_bls[],
bufferlist& epilogue_bl) const {
ceph_assert(epilogue_bl.length() == sizeof(epilogue_crc_rev0_block_t));
auto epilogue = reinterpret_cast<const epilogue_crc_rev0_block_t*>(
epilogue_bl.c_str());
for (size_t i = 0; i < m_descs.size(); i++) {
ceph_assert(segment_bls[i].length() == m_descs[i].logical_len);
if (m_with_data_crc) {
check_segment_crc(segment_bls[i], epilogue->crc_values[i]);
}
}
return !(epilogue->late_flags & FRAME_LATE_FLAG_ABORTED);
}
bool FrameAssembler::disasm_all_secure_rev0(bufferlist segment_bls[],
bufferlist& epilogue_bl) const {
for (size_t i = 0; i < m_descs.size(); i++) {
ceph_assert(segment_bls[i].length() == get_segment_padded_len(i));
if (segment_bls[i].length() > 0) {
m_crypto->rx->authenticated_decrypt_update(segment_bls[i]);
unpad_zero(segment_bls[i], m_descs[i].logical_len);
}
}
ceph_assert(epilogue_bl.length() == sizeof(epilogue_secure_rev0_block_t) +
get_auth_tag_len());
m_crypto->rx->authenticated_decrypt_update_final(epilogue_bl);
auto epilogue = reinterpret_cast<const epilogue_secure_rev0_block_t*>(
epilogue_bl.c_str());
return !(epilogue->late_flags & FRAME_LATE_FLAG_ABORTED);
}
void FrameAssembler::disasm_first_crc_rev1(bufferlist& preamble_bl,
bufferlist& segment_bl) const {
ceph_assert(preamble_bl.length() == sizeof(preamble_block_t));
if (m_descs[0].logical_len > 0) {
ceph_assert(segment_bl.length() == m_descs[0].logical_len +
FRAME_CRC_SIZE);
bufferlist::const_iterator it(&segment_bl, m_descs[0].logical_len);
uint32_t expected_crc;
decode(expected_crc, it);
segment_bl.splice(m_descs[0].logical_len, FRAME_CRC_SIZE);
if (m_with_data_crc) {
check_segment_crc(segment_bl, expected_crc);
}
} else {
ceph_assert(segment_bl.length() == 0);
}
}
bool FrameAssembler::disasm_remaining_crc_rev1(bufferlist segment_bls[],
bufferlist& epilogue_bl) const {
ceph_assert(epilogue_bl.length() == sizeof(epilogue_crc_rev1_block_t));
auto epilogue = reinterpret_cast<const epilogue_crc_rev1_block_t*>(
epilogue_bl.c_str());
for (size_t i = 1; i < m_descs.size(); i++) {
ceph_assert(segment_bls[i].length() == m_descs[i].logical_len);
if (m_with_data_crc) {
check_segment_crc(segment_bls[i], epilogue->crc_values[i - 1]);
}
}
return check_epilogue_late_status(epilogue->late_status);
}
void FrameAssembler::disasm_first_secure_rev1(bufferlist& preamble_bl,
bufferlist& segment_bl) const {
ceph_assert(preamble_bl.length() == FRAME_PREAMBLE_WITH_INLINE_SIZE);
uint32_t padded_len = get_segment_padded_len(0);
if (padded_len > FRAME_PREAMBLE_INLINE_SIZE) {
ceph_assert(segment_bl.length() == padded_len + get_auth_tag_len() -
FRAME_PREAMBLE_INLINE_SIZE);
m_crypto->rx->reset_rx_handler();
m_crypto->rx->authenticated_decrypt_update_final(segment_bl);
// prepend the inline buffer (already decrypted) to segment_bl
bufferlist tmp;
segment_bl.swap(tmp);
preamble_bl.splice(sizeof(preamble_block_t), FRAME_PREAMBLE_INLINE_SIZE,
&segment_bl);
segment_bl.claim_append(std::move(tmp));
} else {
ceph_assert(segment_bl.length() == 0);
preamble_bl.splice(sizeof(preamble_block_t), FRAME_PREAMBLE_INLINE_SIZE,
&segment_bl);
}
unpad_zero(segment_bl, m_descs[0].logical_len);
ceph_assert(segment_bl.length() == m_descs[0].logical_len);
}
bool FrameAssembler::disasm_remaining_secure_rev1(
bufferlist segment_bls[], bufferlist& epilogue_bl) const {
m_crypto->rx->reset_rx_handler();
for (size_t i = 1; i < m_descs.size(); i++) {
ceph_assert(segment_bls[i].length() == get_segment_padded_len(i));
if (segment_bls[i].length() > 0) {
m_crypto->rx->authenticated_decrypt_update(segment_bls[i]);
unpad_zero(segment_bls[i], m_descs[i].logical_len);
}
}
ceph_assert(epilogue_bl.length() == sizeof(epilogue_secure_rev1_block_t) +
get_auth_tag_len());
m_crypto->rx->authenticated_decrypt_update_final(epilogue_bl);
auto epilogue = reinterpret_cast<const epilogue_secure_rev1_block_t*>(
epilogue_bl.c_str());
return check_epilogue_late_status(epilogue->late_status);
}
bool FrameAssembler::disassemble_segments(bufferlist& preamble_bl,
bufferlist segments_bls[], bufferlist& epilogue_bl) const {
disassemble_first_segment(preamble_bl, segments_bls[0]);
if (disassemble_remaining_segments(segments_bls, epilogue_bl)) {
if (is_compressed()) {
disassemble_decompress(segments_bls);
}
return true;
}
return false;
}
void FrameAssembler::disassemble_first_segment(bufferlist& preamble_bl,
bufferlist& segment_bl) const {
ceph_assert(!m_descs.empty());
if (m_is_rev1) {
if (m_crypto->rx) {
disasm_first_secure_rev1(preamble_bl, segment_bl);
} else {
disasm_first_crc_rev1(preamble_bl, segment_bl);
}
} else {
// noop, everything is handled in disassemble_remaining_segments()
}
}
bool FrameAssembler::disassemble_remaining_segments(
bufferlist segment_bls[], bufferlist& epilogue_bl) const {
ceph_assert(!m_descs.empty());
if (m_is_rev1) {
if (m_descs.size() == 1) {
// no epilogue if only one segment
ceph_assert(epilogue_bl.length() == 0);
return true;
} else if (m_crypto->rx) {
return disasm_remaining_secure_rev1(segment_bls, epilogue_bl);
} else {
return disasm_remaining_crc_rev1(segment_bls, epilogue_bl);
}
} else if (m_crypto->rx) {
return disasm_all_secure_rev0(segment_bls, epilogue_bl);
}
return disasm_all_crc_rev0(segment_bls, epilogue_bl);
}
std::ostream& operator<<(std::ostream& os, const FrameAssembler& frame_asm) {
if (!frame_asm.m_descs.empty()) {
os << frame_asm.get_preamble_onwire_len();
for (size_t i = 0; i < frame_asm.m_descs.size(); i++) {
os << " + " << frame_asm.get_segment_onwire_len(i)
<< " (logical " << frame_asm.m_descs[i].logical_len
<< "/" << frame_asm.m_descs[i].align << ")";
}
os << " + " << frame_asm.get_epilogue_onwire_len() << " ";
}
os << "rev1=" << frame_asm.m_is_rev1
<< " rx=" << frame_asm.m_crypto->rx.get()
<< " tx=" << frame_asm.m_crypto->tx.get()
<< " comp rx=" << frame_asm.m_compression->rx.get()
<< " comp tx=" << frame_asm.m_compression->tx.get()
<< " compressed=" << frame_asm.is_compressed();
return os;
}
void FrameAssembler::asm_compress(bufferlist segment_bls[]) {
std::array<bufferlist, MAX_NUM_SEGMENTS> compressed;
m_compression->tx->reset_handler(m_descs.size(), get_frame_logical_len());
bool abort = false;
for (size_t i = 0; (i < m_descs.size()) && !abort; i++) {
auto out = m_compression->tx->compress(segment_bls[i]);
if (!out) {
abort = true;
} else {
compressed[i] = std::move(*out);
}
}
if (!abort) {
m_compression->tx->done();
for (size_t i = 0; i < m_descs.size(); i++) {
segment_bls[i].swap(compressed[i]);
m_descs[i].logical_len = segment_bls[i].length();
}
m_flags |= FRAME_EARLY_DATA_COMPRESSED;
}
}
void FrameAssembler::disassemble_decompress(bufferlist segment_bls[]) const {
for (size_t i = 0; i < m_descs.size(); i++) {
auto out = m_compression->rx->decompress(segment_bls[i]);
if (!out) {
throw FrameError("Segment decompression failed");
} else {
segment_bls[i] = std::move(*out);
}
}
}
} // namespace ceph::msgr::v2
| 20,453 | 36.121597 | 79 |
cc
|
null |
ceph-main/src/msg/async/frames_v2.h
|
#ifndef _MSG_ASYNC_FRAMES_V2_
#define _MSG_ASYNC_FRAMES_V2_
#include "include/types.h"
#include "common/Clock.h"
#include "crypto_onwire.h"
#include "compression_onwire.h"
#include <array>
#include <iosfwd>
#include <utility>
#include <boost/container/static_vector.hpp>
/**
* Protocol V2 Frame Structures
*
* Documentation in: doc/dev/msgr2.rst
**/
namespace ceph::msgr::v2 {
// We require these features from any peer, period, in order to encode
// a entity_addrvec_t.
const uint64_t msgr2_required = CEPH_FEATUREMASK_MSG_ADDR2;
// We additionally assume the peer has the below features *purely for
// the purpose of encoding the frames themselves*. The only complex
// types in the frames are entity_addr_t and entity_addrvec_t, and we
// specifically want the peer to understand the (new in nautilus)
// TYPE_ANY. We treat narrow this assumption to frames because we
// expect there may be future clients (the kernel) that understand
// msgr v2 and understand this encoding but don't necessarily have
// everything else that SERVER_NAUTILUS implies. Yes, a fresh feature
// bit would be a cleaner approach, but those are scarce these days.
const uint64_t msgr2_frame_assumed =
msgr2_required |
CEPH_FEATUREMASK_SERVER_NAUTILUS;
enum class Tag : __u8 {
HELLO = 1,
AUTH_REQUEST,
AUTH_BAD_METHOD,
AUTH_REPLY_MORE,
AUTH_REQUEST_MORE,
AUTH_DONE,
AUTH_SIGNATURE,
CLIENT_IDENT,
SERVER_IDENT,
IDENT_MISSING_FEATURES,
SESSION_RECONNECT,
SESSION_RESET,
SESSION_RETRY,
SESSION_RETRY_GLOBAL,
SESSION_RECONNECT_OK,
WAIT,
MESSAGE,
KEEPALIVE2,
KEEPALIVE2_ACK,
ACK,
COMPRESSION_REQUEST,
COMPRESSION_DONE
};
struct segment_t {
// TODO: this will be dropped with support for `allocation policies`.
// We need them because of the rx_buffers zero-copy optimization.
static constexpr __u16 PAGE_SIZE_ALIGNMENT = 4096;
static constexpr __u16 DEFAULT_ALIGNMENT = sizeof(void *);
ceph_le32 length;
ceph_le16 alignment;
} __attribute__((packed));
struct SegmentIndex {
struct Msg {
static constexpr std::size_t HEADER = 0;
static constexpr std::size_t FRONT = 1;
static constexpr std::size_t MIDDLE = 2;
static constexpr std::size_t DATA = 3;
};
struct Control {
static constexpr std::size_t PAYLOAD = 0;
};
};
static constexpr uint8_t CRYPTO_BLOCK_SIZE { 16 };
static constexpr std::size_t MAX_NUM_SEGMENTS = 4;
// V2 preamble consists of one or more preamble blocks depending on
// the number of segments a particular frame needs. Each block holds
// up to MAX_NUM_SEGMENTS segments and has its own CRC.
//
// XXX: currently the multi-segment facility is NOT implemented.
struct preamble_block_t {
// Tag. For multi-segmented frames the value is the same
// between subsequent preamble blocks.
__u8 tag;
// Number of segments to go in entire frame. First preable block has
// set this to just #segments, second #segments - MAX_NUM_SEGMENTS,
// third to #segments - MAX_NUM_SEGMENTS and so on.
__u8 num_segments;
segment_t segments[MAX_NUM_SEGMENTS];
__u8 flags;
__u8 _reserved;
// CRC32 for this single preamble block.
ceph_le32 crc;
} __attribute__((packed));
static_assert(sizeof(preamble_block_t) % CRYPTO_BLOCK_SIZE == 0);
static_assert(std::is_standard_layout<preamble_block_t>::value);
struct epilogue_crc_rev0_block_t {
__u8 late_flags; // FRAME_LATE_FLAG_ABORTED
ceph_le32 crc_values[MAX_NUM_SEGMENTS];
} __attribute__((packed));
static_assert(std::is_standard_layout_v<epilogue_crc_rev0_block_t>);
struct epilogue_crc_rev1_block_t {
__u8 late_status; // FRAME_LATE_STATUS_*
ceph_le32 crc_values[MAX_NUM_SEGMENTS - 1];
} __attribute__((packed));
static_assert(std::is_standard_layout_v<epilogue_crc_rev1_block_t>);
struct epilogue_secure_rev0_block_t {
__u8 late_flags; // FRAME_LATE_FLAG_ABORTED
__u8 padding[CRYPTO_BLOCK_SIZE - sizeof(late_flags)];
} __attribute__((packed));
static_assert(sizeof(epilogue_secure_rev0_block_t) % CRYPTO_BLOCK_SIZE == 0);
static_assert(std::is_standard_layout_v<epilogue_secure_rev0_block_t>);
// epilogue_secure_rev0_block_t with late_flags changed to late_status
struct epilogue_secure_rev1_block_t {
__u8 late_status; // FRAME_LATE_STATUS_*
__u8 padding[CRYPTO_BLOCK_SIZE - sizeof(late_status)];
} __attribute__((packed));
static_assert(sizeof(epilogue_secure_rev1_block_t) % CRYPTO_BLOCK_SIZE == 0);
static_assert(std::is_standard_layout_v<epilogue_secure_rev1_block_t>);
static constexpr uint32_t FRAME_CRC_SIZE = 4;
static constexpr uint32_t FRAME_PREAMBLE_INLINE_SIZE = 48;
static_assert(FRAME_PREAMBLE_INLINE_SIZE % CRYPTO_BLOCK_SIZE == 0);
// just for performance, nothing should break otherwise
static_assert(sizeof(ceph_msg_header2) <= FRAME_PREAMBLE_INLINE_SIZE);
static constexpr uint32_t FRAME_PREAMBLE_WITH_INLINE_SIZE =
sizeof(preamble_block_t) + FRAME_PREAMBLE_INLINE_SIZE;
// A frame can be aborted by the sender after transmitting the
// preamble and the first segment. The remainder of the frame
// is filled with zeros, up until the epilogue.
//
// This flag is for msgr2.0. Note that in crc mode, late_flags
// is not covered by any crc -- a single bit flip can result in
// a completed frame being dropped or in an aborted frame with
// garbage segment payloads being dispatched.
#define FRAME_LATE_FLAG_ABORTED (1<<0)
// For msgr2.1, FRAME_LATE_STATUS_ABORTED has the same meaning
// as FRAME_LATE_FLAG_ABORTED and late_status replaces late_flags.
// Bit error detection in crc mode is achieved by using a 4-bit
// nibble per flag with two code words that are far apart in terms
// of Hamming Distance (HD=4, same as provided by CRC32-C for
// input lengths over ~5K).
#define FRAME_LATE_STATUS_ABORTED 0x1
#define FRAME_LATE_STATUS_COMPLETE 0xe
#define FRAME_LATE_STATUS_ABORTED_MASK 0xf
#define FRAME_LATE_STATUS_RESERVED_TRUE 0x10
#define FRAME_LATE_STATUS_RESERVED_FALSE 0xe0
#define FRAME_LATE_STATUS_RESERVED_MASK 0xf0
// For msgr 2.1, FRAME_EARLY_X flags are sent as part of epilogue.
//
// This flag indicates whether frame segments have been compressed by
// sender, and used in segments' disassemblig phase.
#define FRAME_EARLY_DATA_COMPRESSED 0X1
struct FrameError : std::runtime_error {
using runtime_error::runtime_error;
};
class FrameAssembler {
public:
// crypto must be non-null
FrameAssembler(const ceph::crypto::onwire::rxtx_t* crypto, bool is_rev1,
bool with_data_crc, const ceph::compression::onwire::rxtx_t* compression)
: m_crypto(crypto), m_is_rev1(is_rev1), m_with_data_crc(with_data_crc),
m_compression(compression) {}
void set_is_rev1(bool is_rev1) {
m_descs.clear();
m_flags = 0;
m_is_rev1 = is_rev1;
}
bool get_is_rev1() {
return m_is_rev1;
}
size_t get_num_segments() const {
ceph_assert(!m_descs.empty());
return m_descs.size();
}
uint32_t get_segment_logical_len(size_t seg_idx) const {
ceph_assert(seg_idx < m_descs.size());
return m_descs[seg_idx].logical_len;
}
uint16_t get_segment_align(size_t seg_idx) const {
ceph_assert(seg_idx < m_descs.size());
return m_descs[seg_idx].align;
}
// Preamble:
//
// preamble_block_t
// [preamble inline buffer + auth tag -- only in msgr2.1 secure mode]
//
// The preamble is generated unconditionally.
//
// In msgr2.1 secure mode, the first segment is inlined into the
// preamble inline buffer, either fully or partially.
uint32_t get_preamble_onwire_len() const {
if (m_is_rev1 && m_crypto->rx) {
return FRAME_PREAMBLE_WITH_INLINE_SIZE + get_auth_tag_len();
}
return sizeof(preamble_block_t);
}
// Segment:
//
// segment payload
// [zero padding -- only in secure mode]
// [crc or auth tag -- only in msgr2.1, only for the first segment]
//
// For an empty segment, nothing is generated. In msgr2.1 secure
// mode, if the first segment gets fully inlined into the preamble
// inline buffer, it is considered empty.
uint32_t get_segment_onwire_len(size_t seg_idx) const {
ceph_assert(seg_idx < m_descs.size());
if (m_crypto->rx) {
uint32_t padded_len = get_segment_padded_len(seg_idx);
if (m_is_rev1 && seg_idx == 0) {
if (padded_len > FRAME_PREAMBLE_INLINE_SIZE) {
return padded_len + get_auth_tag_len() - FRAME_PREAMBLE_INLINE_SIZE;
}
return 0;
}
return padded_len;
}
if (m_is_rev1 && seg_idx == 0 && m_descs[0].logical_len > 0) {
return m_descs[0].logical_len + FRAME_CRC_SIZE;
}
return m_descs[seg_idx].logical_len;
}
// Epilogue:
//
// epilogue_*_block_t
// [auth tag -- only in secure mode]
//
// For msgr2.0, the epilogue is generated unconditionally. In
// crc mode, it stores crcs for all segments; the preamble is
// covered by its own crc. In secure mode, the epilogue auth tag
// covers the whole frame.
//
// For msgr2.1, the epilogue is generated only if the frame has
// more than one segment (i.e. at least one of second to fourth
// segments is not empty). In crc mode, it stores crcs for
// second to fourh segments; the preamble and the first segment
// are covered by their own crcs. In secure mode, the epilogue
// auth tag covers second to fourth segments; the preamble and the
// first segment (if not fully inlined into the preamble inline
// buffer) are covered by their own auth tags.
//
// Note that the auth tag format is an implementation detail of a
// particular cipher. FrameAssembler is concerned only with where
// the auth tag is placed (at the end of the ciphertext) and how
// long it is (RxHandler::get_extra_size_at_final()). This is to
// provide room for other encryption algorithms: currently we use
// AES-128-GCM with 16-byte tags, but it is possible to switch to
// e.g. AES-128-CBC + HMAC-SHA512 without affecting the protocol
// (except for the cipher negotiation, of course).
//
// Additionally, each variant of the epilogue contains either
// late_flags or late_status field that directs handling of frames
// with more than one segment.
uint32_t get_epilogue_onwire_len() const {
ceph_assert(!m_descs.empty());
if (m_is_rev1 && m_descs.size() == 1) {
return 0;
}
if (m_crypto->rx) {
return (m_is_rev1 ? sizeof(epilogue_secure_rev1_block_t) :
sizeof(epilogue_secure_rev0_block_t)) + get_auth_tag_len();
}
return m_is_rev1 ? sizeof(epilogue_crc_rev1_block_t) :
sizeof(epilogue_crc_rev0_block_t);
}
uint64_t get_frame_logical_len() const;
uint64_t get_frame_onwire_len() const;
bufferlist assemble_frame(Tag tag, bufferlist segment_bls[],
const uint16_t segment_aligns[],
size_t segment_count);
Tag disassemble_preamble(bufferlist& preamble_bl);
bool disassemble_segments(bufferlist& preamble_bl,
bufferlist segments_bls[],
bufferlist& epilogue_bl) const;
private:
struct segment_desc_t {
uint32_t logical_len;
uint16_t align;
};
uint32_t get_segment_padded_len(size_t seg_idx) const {
return p2roundup<uint32_t>(m_descs[seg_idx].logical_len,
CRYPTO_BLOCK_SIZE);
}
uint32_t get_auth_tag_len() const {
return m_crypto->rx->get_extra_size_at_final();
}
bool is_compressed() const {
return m_flags & FRAME_EARLY_DATA_COMPRESSED;
}
void asm_compress(bufferlist segment_bls[]);
bufferlist asm_crc_rev0(const preamble_block_t& preamble,
bufferlist segment_bls[]) const;
bufferlist asm_secure_rev0(const preamble_block_t& preamble,
bufferlist segment_bls[]) const;
bufferlist asm_crc_rev1(const preamble_block_t& preamble,
bufferlist segment_bls[]) const;
bufferlist asm_secure_rev1(const preamble_block_t& preamble,
bufferlist segment_bls[]) const;
// Like msgr1, and unlike msgr2.0, msgr2.1 allows interpreting the
// first segment before reading in the rest of the frame.
//
// For msgr2.1 (set_is_rev1(true)), you may:
//
// - read in the first segment
// - call disassemble_first_segment()
// - use the contents of the first segment, for example to
// look up user-provided buffers based on ceph_msg_header2::tid
// - read in the remaining segments, possibly directly into
// user-provided buffers
// - read in epilogue
// - call disassemble_remaining_segments()
// - call disasm_all_decompress()
//
// For msgr2.0 (set_is_rev1(false)), disassemble_first_segment() is
// a noop. To accomodate, disassemble_remaining_segments() always
// takes all segments and skips over the first segment in msgr2.1
// case. You must:
//
// - read in all segments
// - read in epilogue
// - call disassemble_remaining_segments()
// - call disasm_all_decompress()
//
// disassemble_remaining_segments() returns true if the frame is
// ready for dispatching, or false if it was aborted by the sender
// and must be dropped.
void disassemble_first_segment(bufferlist& preamble_bl,
bufferlist& segment_bl) const;
bool disassemble_remaining_segments(bufferlist segment_bls[],
bufferlist& epilogue_bl) const;
void disassemble_decompress(bufferlist segment_bls[]) const;
bool disasm_all_crc_rev0(bufferlist segment_bls[],
bufferlist& epilogue_bl) const;
bool disasm_all_secure_rev0(bufferlist segment_bls[],
bufferlist& epilogue_bl) const;
void disasm_first_crc_rev1(bufferlist& preamble_bl,
bufferlist& segment_bl) const;
bool disasm_remaining_crc_rev1(bufferlist segment_bls[],
bufferlist& epilogue_bl) const;
void disasm_first_secure_rev1(bufferlist& preamble_bl,
bufferlist& segment_bl) const;
bool disasm_remaining_secure_rev1(bufferlist segment_bls[],
bufferlist& epilogue_bl) const;
void fill_preamble(Tag tag, preamble_block_t& preamble) const;
friend std::ostream& operator<<(std::ostream& os,
const FrameAssembler& frame_asm);
boost::container::static_vector<segment_desc_t, MAX_NUM_SEGMENTS> m_descs;
__u8 m_flags;
const ceph::crypto::onwire::rxtx_t* m_crypto;
bool m_is_rev1; // msgr2.1?
bool m_with_data_crc;
const ceph::compression::onwire::rxtx_t* m_compression;
};
template <class T, uint16_t... SegmentAlignmentVs>
struct Frame {
static constexpr size_t SegmentsNumV = sizeof...(SegmentAlignmentVs);
static_assert(SegmentsNumV > 0 && SegmentsNumV <= MAX_NUM_SEGMENTS);
protected:
std::array<ceph::bufferlist, SegmentsNumV> segments;
private:
static constexpr std::array<uint16_t, SegmentsNumV> alignments {
SegmentAlignmentVs...
};
public:
ceph::bufferlist get_buffer(FrameAssembler& tx_frame_asm) {
auto bl = tx_frame_asm.assemble_frame(T::tag, segments.data(),
alignments.data(), SegmentsNumV);
ceph_assert(bl.length() == tx_frame_asm.get_frame_onwire_len());
return bl;
}
};
// ControlFrames are used to manage transceiver state (like connections) and
// orchestrate transfers of MessageFrames. They use only single segment with
// marshalling facilities -- derived classes specify frame structure through
// Args pack while ControlFrame provides common encode/decode machinery.
template <class C, typename... Args>
class ControlFrame : public Frame<C, segment_t::DEFAULT_ALIGNMENT /* single segment */> {
protected:
ceph::bufferlist &get_payload_segment() {
return this->segments[SegmentIndex::Control::PAYLOAD];
}
// this tuple is only used when decoding values from a payload segment
std::tuple<Args...> _values;
// FIXME: for now, we assume specific features for the purpoess of encoding
// the frames themselves (*not* messages in message frames!).
uint64_t features = msgr2_frame_assumed;
template <typename T>
inline void _encode_payload_each(T &t) {
if constexpr (std::is_same<T, std::vector<uint32_t> const>()) {
encode((uint32_t)t.size(), this->get_payload_segment(), features);
for (const auto &elem : t) {
encode(elem, this->get_payload_segment(), features);
}
} else {
encode(t, this->get_payload_segment(), features);
}
}
template <typename T>
inline void _decode_payload_each(T &t, bufferlist::const_iterator &ti) const {
if constexpr (std::is_same<T, std::vector<uint32_t>>()) {
uint32_t size;
decode(size, ti);
t.resize(size);
for (uint32_t i = 0; i < size; ++i) {
decode(t[i], ti);
}
} else {
decode(t, ti);
}
}
template <std::size_t... Is>
inline void _decode_payload(bufferlist::const_iterator &ti,
std::index_sequence<Is...>) const {
(_decode_payload_each((Args &)std::get<Is>(_values), ti), ...);
}
template <std::size_t N>
inline decltype(auto) get_val() {
return std::get<N>(_values);
}
ControlFrame()
: Frame<C, segment_t::DEFAULT_ALIGNMENT /* single segment */>() {
}
void _encode(const Args &... args) {
(_encode_payload_each(args), ...);
}
void _decode(const ceph::bufferlist &bl) {
auto ti = bl.cbegin();
_decode_payload(ti, std::index_sequence_for<Args...>());
}
public:
static C Encode(const Args &... args) {
C c;
c._encode(args...);
return c;
}
static C Decode(const ceph::bufferlist &payload) {
C c;
c._decode(payload);
return c;
}
};
struct HelloFrame : public ControlFrame<HelloFrame,
uint8_t, // entity type
entity_addr_t> { // peer address
static const Tag tag = Tag::HELLO;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint8_t &entity_type() { return get_val<0>(); }
inline entity_addr_t &peer_addr() { return get_val<1>(); }
protected:
using ControlFrame::ControlFrame;
};
struct AuthRequestFrame : public ControlFrame<AuthRequestFrame,
uint32_t, // auth method
std::vector<uint32_t>, // preferred modes
bufferlist> { // auth payload
static const Tag tag = Tag::AUTH_REQUEST;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint32_t &method() { return get_val<0>(); }
inline std::vector<uint32_t> &preferred_modes() { return get_val<1>(); }
inline bufferlist &auth_payload() { return get_val<2>(); }
protected:
using ControlFrame::ControlFrame;
};
struct AuthBadMethodFrame : public ControlFrame<AuthBadMethodFrame,
uint32_t, // method
int32_t, // result
std::vector<uint32_t>, // allowed methods
std::vector<uint32_t>> { // allowed modes
static const Tag tag = Tag::AUTH_BAD_METHOD;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint32_t &method() { return get_val<0>(); }
inline int32_t &result() { return get_val<1>(); }
inline std::vector<uint32_t> &allowed_methods() { return get_val<2>(); }
inline std::vector<uint32_t> &allowed_modes() { return get_val<3>(); }
protected:
using ControlFrame::ControlFrame;
};
struct AuthReplyMoreFrame : public ControlFrame<AuthReplyMoreFrame,
bufferlist> { // auth payload
static const Tag tag = Tag::AUTH_REPLY_MORE;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline bufferlist &auth_payload() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct AuthRequestMoreFrame : public ControlFrame<AuthRequestMoreFrame,
bufferlist> { // auth payload
static const Tag tag = Tag::AUTH_REQUEST_MORE;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline bufferlist &auth_payload() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct AuthDoneFrame : public ControlFrame<AuthDoneFrame,
uint64_t, // global id
uint32_t, // connection mode
bufferlist> { // auth method payload
static const Tag tag = Tag::AUTH_DONE;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint64_t &global_id() { return get_val<0>(); }
inline uint32_t &con_mode() { return get_val<1>(); }
inline bufferlist &auth_payload() { return get_val<2>(); }
protected:
using ControlFrame::ControlFrame;
};
struct AuthSignatureFrame
: public ControlFrame<AuthSignatureFrame,
sha256_digest_t> {
static const Tag tag = Tag::AUTH_SIGNATURE;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline sha256_digest_t &signature() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct ClientIdentFrame
: public ControlFrame<ClientIdentFrame,
entity_addrvec_t, // my addresses
entity_addr_t, // target address
int64_t, // global_id
uint64_t, // global seq
uint64_t, // supported features
uint64_t, // required features
uint64_t, // flags
uint64_t> { // client cookie
static const Tag tag = Tag::CLIENT_IDENT;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline entity_addrvec_t &addrs() { return get_val<0>(); }
inline entity_addr_t &target_addr() { return get_val<1>(); }
inline int64_t &gid() { return get_val<2>(); }
inline uint64_t &global_seq() { return get_val<3>(); }
inline uint64_t &supported_features() { return get_val<4>(); }
inline uint64_t &required_features() { return get_val<5>(); }
inline uint64_t &flags() { return get_val<6>(); }
inline uint64_t &cookie() { return get_val<7>(); }
protected:
using ControlFrame::ControlFrame;
};
struct ServerIdentFrame
: public ControlFrame<ServerIdentFrame,
entity_addrvec_t, // my addresses
int64_t, // global_id
uint64_t, // global seq
uint64_t, // supported features
uint64_t, // required features
uint64_t, // flags
uint64_t> { // server cookie
static const Tag tag = Tag::SERVER_IDENT;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline entity_addrvec_t &addrs() { return get_val<0>(); }
inline int64_t &gid() { return get_val<1>(); }
inline uint64_t &global_seq() { return get_val<2>(); }
inline uint64_t &supported_features() { return get_val<3>(); }
inline uint64_t &required_features() { return get_val<4>(); }
inline uint64_t &flags() { return get_val<5>(); }
inline uint64_t &cookie() { return get_val<6>(); }
protected:
using ControlFrame::ControlFrame;
};
struct ReconnectFrame
: public ControlFrame<ReconnectFrame,
entity_addrvec_t, // my addresses
uint64_t, // client cookie
uint64_t, // server cookie
uint64_t, // global sequence
uint64_t, // connect sequence
uint64_t> { // message sequence
static const Tag tag = Tag::SESSION_RECONNECT;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline entity_addrvec_t &addrs() { return get_val<0>(); }
inline uint64_t &client_cookie() { return get_val<1>(); }
inline uint64_t &server_cookie() { return get_val<2>(); }
inline uint64_t &global_seq() { return get_val<3>(); }
inline uint64_t &connect_seq() { return get_val<4>(); }
inline uint64_t &msg_seq() { return get_val<5>(); }
protected:
using ControlFrame::ControlFrame;
};
struct ResetFrame : public ControlFrame<ResetFrame,
bool> { // full reset
static const Tag tag = Tag::SESSION_RESET;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline bool &full() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct RetryFrame : public ControlFrame<RetryFrame,
uint64_t> { // connection seq
static const Tag tag = Tag::SESSION_RETRY;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint64_t &connect_seq() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct RetryGlobalFrame : public ControlFrame<RetryGlobalFrame,
uint64_t> { // global seq
static const Tag tag = Tag::SESSION_RETRY_GLOBAL;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint64_t &global_seq() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct WaitFrame : public ControlFrame<WaitFrame> {
static const Tag tag = Tag::WAIT;
using ControlFrame::Encode;
using ControlFrame::Decode;
protected:
using ControlFrame::ControlFrame;
};
struct ReconnectOkFrame : public ControlFrame<ReconnectOkFrame,
uint64_t> { // message seq
static const Tag tag = Tag::SESSION_RECONNECT_OK;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint64_t &msg_seq() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct IdentMissingFeaturesFrame
: public ControlFrame<IdentMissingFeaturesFrame,
uint64_t> { // missing features mask
static const Tag tag = Tag::IDENT_MISSING_FEATURES;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint64_t &features() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct KeepAliveFrame : public ControlFrame<KeepAliveFrame,
utime_t> { // timestamp
static const Tag tag = Tag::KEEPALIVE2;
using ControlFrame::Encode;
using ControlFrame::Decode;
static KeepAliveFrame Encode() {
return KeepAliveFrame::Encode(ceph_clock_now());
}
inline utime_t ×tamp() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct KeepAliveFrameAck : public ControlFrame<KeepAliveFrameAck,
utime_t> { // ack timestamp
static const Tag tag = Tag::KEEPALIVE2_ACK;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline utime_t ×tamp() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
struct AckFrame : public ControlFrame<AckFrame,
uint64_t> { // message sequence
static const Tag tag = Tag::ACK;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline uint64_t &seq() { return get_val<0>(); }
protected:
using ControlFrame::ControlFrame;
};
using segment_bls_t =
boost::container::static_vector<bufferlist, MAX_NUM_SEGMENTS>;
// This class is used for encoding/decoding header of the message frame.
// Body is processed almost independently with the sole junction point
// being the `extra_payload_len` passed to get_buffer().
struct MessageFrame : public Frame<MessageFrame,
/* four segments */
segment_t::DEFAULT_ALIGNMENT,
segment_t::DEFAULT_ALIGNMENT,
segment_t::DEFAULT_ALIGNMENT,
segment_t::PAGE_SIZE_ALIGNMENT> {
static const Tag tag = Tag::MESSAGE;
static MessageFrame Encode(const ceph_msg_header2 &msg_header,
const ceph::bufferlist &front,
const ceph::bufferlist &middle,
const ceph::bufferlist &data) {
MessageFrame f;
f.segments[SegmentIndex::Msg::HEADER].append(
reinterpret_cast<const char*>(&msg_header), sizeof(msg_header));
f.segments[SegmentIndex::Msg::FRONT] = front;
f.segments[SegmentIndex::Msg::MIDDLE] = middle;
f.segments[SegmentIndex::Msg::DATA] = data;
return f;
}
static MessageFrame Decode(segment_bls_t& recv_segments) {
MessageFrame f;
// transfer segments' bufferlists. If a MessageFrame contains less
// SegmentsNumV segments, the missing ones will be seen as zeroed.
for (__u8 idx = 0; idx < std::size(recv_segments); idx++) {
f.segments[idx] = std::move(recv_segments[idx]);
}
return f;
}
inline const ceph_msg_header2 &header() {
auto& hdrbl = segments[SegmentIndex::Msg::HEADER];
return reinterpret_cast<const ceph_msg_header2&>(*hdrbl.c_str());
}
ceph::bufferlist &front() {
return segments[SegmentIndex::Msg::FRONT];
}
ceph::bufferlist &middle() {
return segments[SegmentIndex::Msg::MIDDLE];
}
ceph::bufferlist &data() {
return segments[SegmentIndex::Msg::DATA];
}
uint32_t front_len() const {
return segments[SegmentIndex::Msg::FRONT].length();
}
uint32_t middle_len() const {
return segments[SegmentIndex::Msg::MIDDLE].length();
}
uint32_t data_len() const {
return segments[SegmentIndex::Msg::DATA].length();
}
protected:
using Frame::Frame;
};
struct CompressionRequestFrame : public ControlFrame<CompressionRequestFrame,
bool, // is compress
std::vector<uint32_t>> { // preferred methods
static const Tag tag = Tag::COMPRESSION_REQUEST;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline bool &is_compress() { return get_val<0>(); }
inline std::vector<uint32_t> &preferred_methods() { return get_val<1>(); }
protected:
using ControlFrame::ControlFrame;
};
struct CompressionDoneFrame : public ControlFrame<CompressionDoneFrame,
bool, // is compress
uint32_t> { // method
static const Tag tag = Tag::COMPRESSION_DONE;
using ControlFrame::Encode;
using ControlFrame::Decode;
inline bool &is_compress() { return get_val<0>(); }
inline uint32_t &method() { return get_val<1>(); }
protected:
using ControlFrame::ControlFrame;
};
} // namespace ceph::msgr::v2
#endif // _MSG_ASYNC_FRAMES_V2_
| 30,923 | 33.321865 | 91 |
h
|
null |
ceph-main/src/msg/async/net_handler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include "net_handler.h"
#include "common/debug.h"
#include "common/errno.h"
#include "include/compat.h"
#include "include/sock_compat.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "NetHandler "
namespace ceph{
int NetHandler::create_socket(int domain, bool reuse_addr)
{
int s;
int r = 0;
if ((s = socket_cloexec(domain, SOCK_STREAM, 0)) == -1) {
r = ceph_sock_errno();
lderr(cct) << __func__ << " couldn't create socket " << cpp_strerror(r) << dendl;
return -r;
}
#if !defined(__FreeBSD__)
/* Make sure connection-intensive things like the benchmark
* will be able to close/open sockets a zillion of times */
if (reuse_addr) {
int on = 1;
if (::setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (SOCKOPT_VAL_TYPE)&on, sizeof(on)) == -1) {
r = ceph_sock_errno();
lderr(cct) << __func__ << " setsockopt SO_REUSEADDR failed: "
<< strerror(r) << dendl;
compat_closesocket(s);
return -r;
}
}
#endif
return s;
}
int NetHandler::set_nonblock(int sd)
{
int r = 0;
#ifdef _WIN32
ULONG mode = 1;
r = ioctlsocket(sd, FIONBIO, &mode);
if (r) {
lderr(cct) << __func__ << " ioctlsocket(FIONBIO) failed: " << r
<< " " << WSAGetLastError() << dendl;
return -r;
}
#else
int flags;
/* Set the socket nonblocking.
* Note that fcntl(2) for F_GETFL and F_SETFL can't be
* interrupted by a signal. */
if ((flags = fcntl(sd, F_GETFL)) < 0 ) {
r = ceph_sock_errno();
lderr(cct) << __func__ << " fcntl(F_GETFL) failed: " << cpp_strerror(r) << dendl;
return -r;
}
if (fcntl(sd, F_SETFL, flags | O_NONBLOCK) < 0) {
r = ceph_sock_errno();
lderr(cct) << __func__ << " fcntl(F_SETFL,O_NONBLOCK): " << cpp_strerror(r) << dendl;
return -r;
}
#endif
return 0;
}
int NetHandler::set_socket_options(int sd, bool nodelay, int size)
{
int r = 0;
// disable Nagle algorithm?
if (nodelay) {
int flag = 1;
r = ::setsockopt(sd, IPPROTO_TCP, TCP_NODELAY, (SOCKOPT_VAL_TYPE)&flag, sizeof(flag));
if (r < 0) {
r = ceph_sock_errno();
ldout(cct, 0) << "couldn't set TCP_NODELAY: " << cpp_strerror(r) << dendl;
}
}
if (size) {
r = ::setsockopt(sd, SOL_SOCKET, SO_RCVBUF, (SOCKOPT_VAL_TYPE)&size, sizeof(size));
if (r < 0) {
r = ceph_sock_errno();
ldout(cct, 0) << "couldn't set SO_RCVBUF to " << size << ": " << cpp_strerror(r) << dendl;
}
}
// block ESIGPIPE
#ifdef CEPH_USE_SO_NOSIGPIPE
int val = 1;
r = ::setsockopt(sd, SOL_SOCKET, SO_NOSIGPIPE, (SOCKOPT_VAL_TYPE)&val, sizeof(val));
if (r) {
r = ceph_sock_errno();
ldout(cct,0) << "couldn't set SO_NOSIGPIPE: " << cpp_strerror(r) << dendl;
}
#endif
return -r;
}
void NetHandler::set_priority(int sd, int prio, int domain)
{
#ifdef SO_PRIORITY
if (prio < 0) {
return;
}
int r = -1;
#ifdef IPTOS_CLASS_CS6
int iptos = IPTOS_CLASS_CS6;
switch (domain) {
case AF_INET:
r = ::setsockopt(sd, IPPROTO_IP, IP_TOS, (SOCKOPT_VAL_TYPE)&iptos, sizeof(iptos));
break;
case AF_INET6:
r = ::setsockopt(sd, IPPROTO_IPV6, IPV6_TCLASS, (SOCKOPT_VAL_TYPE)&iptos, sizeof(iptos));
break;
default:
lderr(cct) << "couldn't set ToS of unknown family (" << domain << ")"
<< " to " << iptos << dendl;
return;
}
if (r < 0) {
r = ceph_sock_errno();
ldout(cct,0) << "couldn't set TOS to " << iptos
<< ": " << cpp_strerror(r) << dendl;
}
#endif // IPTOS_CLASS_CS6
// setsockopt(IPTOS_CLASS_CS6) sets the priority of the socket as 0.
// See http://goo.gl/QWhvsD and http://goo.gl/laTbjT
// We need to call setsockopt(SO_PRIORITY) after it.
r = ::setsockopt(sd, SOL_SOCKET, SO_PRIORITY, (SOCKOPT_VAL_TYPE)&prio, sizeof(prio));
if (r < 0) {
r = ceph_sock_errno();
ldout(cct, 0) << __func__ << " couldn't set SO_PRIORITY to " << prio
<< ": " << cpp_strerror(r) << dendl;
}
#else
return;
#endif // SO_PRIORITY
}
int NetHandler::generic_connect(const entity_addr_t& addr, const entity_addr_t &bind_addr, bool nonblock)
{
int ret;
int s = create_socket(addr.get_family());
if (s < 0)
return s;
if (nonblock) {
ret = set_nonblock(s);
if (ret < 0) {
compat_closesocket(s);
return ret;
}
}
set_socket_options(s, cct->_conf->ms_tcp_nodelay, cct->_conf->ms_tcp_rcvbuf);
{
entity_addr_t addr = bind_addr;
if (cct->_conf->ms_bind_before_connect && (!addr.is_blank_ip())) {
addr.set_port(0);
ret = ::bind(s, addr.get_sockaddr(), addr.get_sockaddr_len());
if (ret < 0) {
ret = ceph_sock_errno();
ldout(cct, 2) << __func__ << " client bind error " << ", " << cpp_strerror(ret) << dendl;
compat_closesocket(s);
return -ret;
}
}
}
ret = ::connect(s, addr.get_sockaddr(), addr.get_sockaddr_len());
if (ret < 0) {
ret = ceph_sock_errno();
// Windows can return WSAEWOULDBLOCK (converted to EAGAIN).
if ((ret == EINPROGRESS || ret == EAGAIN) && nonblock)
return s;
ldout(cct, 10) << __func__ << " connect: " << cpp_strerror(ret) << dendl;
compat_closesocket(s);
return -ret;
}
return s;
}
int NetHandler::reconnect(const entity_addr_t &addr, int sd)
{
int r = 0;
int ret = ::connect(sd, addr.get_sockaddr(), addr.get_sockaddr_len());
if (ret < 0 && ceph_sock_errno() != EISCONN) {
r = ceph_sock_errno();
ldout(cct, 10) << __func__ << " reconnect: " << r
<< " " << strerror(r) << dendl;
if (r == EINPROGRESS || r == EALREADY || r == EAGAIN)
return 1;
return -r;
}
return 0;
}
int NetHandler::connect(const entity_addr_t &addr, const entity_addr_t& bind_addr)
{
return generic_connect(addr, bind_addr, false);
}
int NetHandler::nonblock_connect(const entity_addr_t &addr, const entity_addr_t& bind_addr)
{
return generic_connect(addr, bind_addr, true);
}
}
| 6,550 | 25.630081 | 105 |
cc
|
null |
ceph-main/src/msg/async/net_handler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_NET_UTILS_H
#define CEPH_COMMON_NET_UTILS_H
#include "common/config.h"
namespace ceph {
class NetHandler {
int generic_connect(const entity_addr_t& addr, const entity_addr_t& bind_addr, bool nonblock);
CephContext *cct;
public:
int create_socket(int domain, bool reuse_addr=false);
explicit NetHandler(CephContext *c): cct(c) {}
int set_nonblock(int sd);
int set_socket_options(int sd, bool nodelay, int size);
int connect(const entity_addr_t &addr, const entity_addr_t& bind_addr);
/**
* Try to reconnect the socket.
*
* @return 0 success
* > 0 just break, and wait for event
* < 0 need to goto fail
*/
int reconnect(const entity_addr_t &addr, int sd);
int nonblock_connect(const entity_addr_t &addr, const entity_addr_t& bind_addr);
void set_priority(int sd, int priority, int domain);
};
}
#endif
| 1,432 | 29.489362 | 98 |
h
|
null |
ceph-main/src/msg/async/dpdk/ARP.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#include "ARP.h"
arp_for_protocol::arp_for_protocol(arp& a, uint16_t proto_num)
: _arp(a), _proto_num(proto_num)
{
_arp.add(proto_num, this);
}
arp_for_protocol::~arp_for_protocol()
{
_arp.del(_proto_num);
}
arp::arp(interface* netif):
_netif(netif),
_proto(netif, eth_protocol_num::arp, [this] { return get_packet(); }),
_rx_packets(
_proto.receive(
[this] (Packet p, ethernet_address ea) {
return process_packet(std::move(p), ea);
},
[this](forward_hash& out_hash_data, Packet& p, size_t off) {
return forward(out_hash_data, p, off);
}
)
)
{}
std::optional<l3_protocol::l3packet> arp::get_packet()
{
std::optional<l3_protocol::l3packet> p;
if (!_packetq.empty()) {
p = std::move(_packetq.front());
_packetq.pop_front();
}
return p;
}
bool arp::forward(forward_hash& out_hash_data, Packet& p, size_t off)
{
auto ah = p.get_header<arp_hdr>(off);
auto i = _arp_for_protocol.find(ntoh(ah->ptype));
if (i != _arp_for_protocol.end()) {
return i->second->forward(out_hash_data, p, off);
}
return false;
}
void arp::add(uint16_t proto_num, arp_for_protocol* afp)
{
_arp_for_protocol[proto_num] = afp;
}
void arp::del(uint16_t proto_num)
{
_arp_for_protocol.erase(proto_num);
}
int arp::process_packet(Packet p, ethernet_address from)
{
auto ah = p.get_header<arp_hdr>()->ntoh();
auto i = _arp_for_protocol.find(ah.ptype);
if (i != _arp_for_protocol.end()) {
i->second->received(std::move(p));
}
return 0;
}
| 2,433 | 26.044444 | 79 |
cc
|
null |
ceph-main/src/msg/async/dpdk/ARP.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*
*/
#ifndef CEPH_MSG_ARP_H_
#define CEPH_MSG_ARP_H_
#include <errno.h>
#include <unordered_map>
#include <functional>
#include "msg/async/Event.h"
#include "ethernet.h"
#include "circular_buffer.h"
#include "ip_types.h"
#include "net.h"
#include "Packet.h"
class arp;
template <typename L3>
class arp_for;
class arp_for_protocol {
protected:
arp& _arp;
uint16_t _proto_num;
public:
arp_for_protocol(arp& a, uint16_t proto_num);
virtual ~arp_for_protocol();
virtual int received(Packet p) = 0;
virtual bool forward(forward_hash& out_hash_data, Packet& p, size_t off) { return false; }
};
class interface;
class arp {
interface* _netif;
l3_protocol _proto;
subscription<Packet, ethernet_address> _rx_packets;
std::unordered_map<uint16_t, arp_for_protocol*> _arp_for_protocol;
circular_buffer<l3_protocol::l3packet> _packetq;
private:
struct arp_hdr {
uint16_t htype;
uint16_t ptype;
arp_hdr ntoh() {
arp_hdr hdr = *this;
hdr.htype = ::ntoh(htype);
hdr.ptype = ::ntoh(ptype);
return hdr;
}
arp_hdr hton() {
arp_hdr hdr = *this;
hdr.htype = ::hton(htype);
hdr.ptype = ::hton(ptype);
return hdr;
}
};
public:
explicit arp(interface* netif);
void add(uint16_t proto_num, arp_for_protocol* afp);
void del(uint16_t proto_num);
private:
ethernet_address l2self() { return _netif->hw_address(); }
int process_packet(Packet p, ethernet_address from);
bool forward(forward_hash& out_hash_data, Packet& p, size_t off);
std::optional<l3_protocol::l3packet> get_packet();
template <class l3_proto>
friend class arp_for;
};
template <typename L3>
class arp_for : public arp_for_protocol {
public:
using l2addr = ethernet_address;
using l3addr = typename L3::address_type;
private:
static constexpr auto max_waiters = 512;
enum oper {
op_request = 1,
op_reply = 2,
};
struct arp_hdr {
uint16_t htype;
uint16_t ptype;
uint8_t hlen;
uint8_t plen;
uint16_t oper;
l2addr sender_hwaddr;
l3addr sender_paddr;
l2addr target_hwaddr;
l3addr target_paddr;
arp_hdr ntoh() {
arp_hdr hdr = *this;
hdr.htype = ::ntoh(htype);
hdr.ptype = ::ntoh(ptype);
hdr.oper = ::ntoh(oper);
hdr.sender_hwaddr = sender_hwaddr.ntoh();
hdr.sender_paddr = sender_paddr.ntoh();
hdr.target_hwaddr = target_hwaddr.ntoh();
hdr.target_paddr = target_paddr.ntoh();
return hdr;
}
arp_hdr hton() {
arp_hdr hdr = *this;
hdr.htype = ::hton(htype);
hdr.ptype = ::hton(ptype);
hdr.oper = ::hton(oper);
hdr.sender_hwaddr = sender_hwaddr.hton();
hdr.sender_paddr = sender_paddr.hton();
hdr.target_hwaddr = target_hwaddr.hton();
hdr.target_paddr = target_paddr.hton();
return hdr;
}
};
struct resolution {
std::vector<std::pair<resolution_cb, Packet>> _waiters;
uint64_t timeout_fd;
};
class C_handle_arp_timeout : public EventCallback {
arp_for *arp;
l3addr paddr;
bool first_request;
public:
C_handle_arp_timeout(arp_for *a, l3addr addr, bool first):
arp(a), paddr(addr), first_request(first) {}
void do_request(uint64_t r) {
arp->send_query(paddr);
auto &res = arp->_in_progress[paddr];
for (auto& p : res._waiters) {
p.first(ethernet_address(), std::move(p.second), -ETIMEDOUT);
}
res._waiters.clear();
res.timeout_fd = arp->center->create_time_event(
1*1000*1000, this);
}
};
friend class C_handle_arp_timeout;
private:
CephContext *cct;
EventCenter *center;
l3addr _l3self = L3::broadcast_address();
std::unordered_map<l3addr, l2addr> _table;
std::unordered_map<l3addr, resolution> _in_progress;
private:
Packet make_query_packet(l3addr paddr);
virtual int received(Packet p) override;
int handle_request(arp_hdr* ah);
l2addr l2self() { return _arp.l2self(); }
void send(l2addr to, Packet &&p);
public:
void send_query(const l3addr& paddr);
explicit arp_for(CephContext *c, arp& a, EventCenter *cen)
: arp_for_protocol(a, L3::arp_protocol_type()), cct(c), center(cen) {
_table[L3::broadcast_address()] = ethernet::broadcast_address();
}
~arp_for() {
for (auto && p : _in_progress)
center->delete_time_event(p.second.timeout_fd);
}
void wait(const l3addr& addr, Packet p, resolution_cb cb);
void learn(l2addr l2, l3addr l3);
void run();
void set_self_addr(l3addr addr) {
_table.erase(_l3self);
_table[addr] = l2self();
_l3self = addr;
}
friend class arp;
};
template <typename L3>
void arp_for<L3>::send(l2addr to, Packet &&p) {
_arp._packetq.push_back(l3_protocol::l3packet{eth_protocol_num::arp, to, std::move(p)});
}
template <typename L3>
Packet arp_for<L3>::make_query_packet(l3addr paddr) {
arp_hdr hdr;
hdr.htype = ethernet::arp_hardware_type();
hdr.ptype = L3::arp_protocol_type();
hdr.hlen = sizeof(l2addr);
hdr.plen = sizeof(l3addr);
hdr.oper = op_request;
hdr.sender_hwaddr = l2self();
hdr.sender_paddr = _l3self;
hdr.target_hwaddr = ethernet::broadcast_address();
hdr.target_paddr = paddr;
hdr = hdr.hton();
return Packet(reinterpret_cast<char*>(&hdr), sizeof(hdr));
}
template <typename L3>
void arp_for<L3>::send_query(const l3addr& paddr) {
send(ethernet::broadcast_address(), make_query_packet(paddr));
}
template <typename L3>
void arp_for<L3>::learn(l2addr hwaddr, l3addr paddr) {
_table[paddr] = hwaddr;
auto i = _in_progress.find(paddr);
if (i != _in_progress.end()) {
auto& res = i->second;
center->delete_time_event(res.timeout_fd);
for (auto &&p : res._waiters) {
p.first(hwaddr, std::move(p.second), 0);
}
_in_progress.erase(i);
}
}
template <typename L3>
void arp_for<L3>::wait(const l3addr& paddr, Packet p, resolution_cb cb) {
auto i = _table.find(paddr);
if (i != _table.end()) {
cb(i->second, std::move(p), 0);
return ;
}
auto j = _in_progress.find(paddr);
auto first_request = j == _in_progress.end();
auto& res = first_request ? _in_progress[paddr] : j->second;
if (first_request) {
res.timeout_fd = center->create_time_event(
1*1000*1000, new C_handle_arp_timeout(this, paddr, first_request));
send_query(paddr);
}
if (res._waiters.size() >= max_waiters) {
cb(ethernet_address(), std::move(p), -EBUSY);
return ;
}
res._waiters.emplace_back(cb, std::move(p));
return ;
}
template <typename L3>
int arp_for<L3>::received(Packet p) {
auto ah = p.get_header<arp_hdr>();
if (!ah) {
return 0;
}
auto h = ah->ntoh();
if (h.hlen != sizeof(l2addr) || h.plen != sizeof(l3addr)) {
return 0;
}
switch (h.oper) {
case op_request:
return handle_request(&h);
case op_reply:
_arp._netif->arp_learn(h.sender_hwaddr, h.sender_paddr);
return 0;
default:
return 0;
}
}
template <typename L3>
int arp_for<L3>::handle_request(arp_hdr* ah) {
if (ah->target_paddr == _l3self
&& _l3self != L3::broadcast_address()) {
ah->oper = op_reply;
ah->target_hwaddr = ah->sender_hwaddr;
ah->target_paddr = ah->sender_paddr;
ah->sender_hwaddr = l2self();
ah->sender_paddr = _l3self;
*ah = ah->hton();
send(ah->target_hwaddr, Packet(reinterpret_cast<char*>(ah), sizeof(*ah)));
}
return 0;
}
#endif /* CEPH_MSG_ARP_H_ */
| 8,248 | 26.31457 | 92 |
h
|
null |
ceph-main/src/msg/async/dpdk/DPDK.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#include <atomic>
#include <vector>
#include <queue>
#include <rte_config.h>
#include <rte_common.h>
#include <rte_eal.h>
#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_cycles.h>
#include <rte_memzone.h>
#include "include/page.h"
#include "align.h"
#include "IP.h"
#include "const.h"
#include "dpdk_rte.h"
#include "DPDK.h"
#include "toeplitz.h"
#include "common/Cycles.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_dpdk
#undef dout_prefix
#define dout_prefix *_dout << "dpdk "
void* as_cookie(struct rte_pktmbuf_pool_private& p) {
return &p;
};
/******************* Net device related constatns *****************************/
static constexpr uint16_t default_ring_size = 512;
//
// We need 2 times the ring size of buffers because of the way PMDs
// refill the ring.
//
static constexpr uint16_t mbufs_per_queue_rx = 2 * default_ring_size;
static constexpr uint16_t rx_gc_thresh = 64;
//
// No need to keep more descriptors in the air than can be sent in a single
// rte_eth_tx_burst() call.
//
static constexpr uint16_t mbufs_per_queue_tx = 2 * default_ring_size;
static constexpr uint16_t mbuf_cache_size = 512;
//
// Size of the data buffer in the non-inline case.
//
// We may want to change (increase) this value in future, while the
// inline_mbuf_data_size value will unlikely change due to reasons described
// above.
//
static constexpr size_t mbuf_data_size = 4096;
static constexpr uint16_t mbuf_overhead =
sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
//
// We'll allocate 2K data buffers for an inline case because this would require
// a single page per mbuf. If we used 4K data buffers here it would require 2
// pages for a single buffer (due to "mbuf_overhead") and this is a much more
// demanding memory constraint.
//
static constexpr size_t inline_mbuf_data_size = 2048;
// (INLINE_MBUF_DATA_SIZE(2K)*32 = 64K = Max TSO/LRO size) + 1 mbuf for headers
static constexpr uint8_t max_frags = 32 + 1;
//
// Intel's 40G NIC HW limit for a number of fragments in an xmit segment.
//
// See Chapter 8.4.1 "Transmit Packet in System Memory" of the xl710 devices
// spec. for more details.
//
static constexpr uint8_t i40e_max_xmit_segment_frags = 8;
//
// VMWare's virtual NIC limit for a number of fragments in an xmit segment.
//
// see drivers/net/vmxnet3/base/vmxnet3_defs.h VMXNET3_MAX_TXD_PER_PKT
//
static constexpr uint8_t vmxnet3_max_xmit_segment_frags = 16;
static constexpr uint16_t inline_mbuf_size = inline_mbuf_data_size + mbuf_overhead;
static size_t huge_page_size = 512 * CEPH_PAGE_SIZE;
uint32_t qp_mempool_obj_size()
{
uint32_t mp_size = 0;
struct rte_mempool_objsz mp_obj_sz = {};
//
// We will align each size to huge page size because DPDK allocates
// physically contiguous memory region for each pool object.
//
// Rx
mp_size += align_up(rte_mempool_calc_obj_size(mbuf_overhead, 0, &mp_obj_sz)+
sizeof(struct rte_pktmbuf_pool_private),
huge_page_size);
//Tx
std::memset(&mp_obj_sz, 0, sizeof(mp_obj_sz));
mp_size += align_up(rte_mempool_calc_obj_size(inline_mbuf_size, 0,
&mp_obj_sz)+
sizeof(struct rte_pktmbuf_pool_private),
huge_page_size);
return mp_size;
}
static constexpr const char* pktmbuf_pool_name = "dpdk_net_pktmbuf_pool";
/*
* When doing reads from the NIC queues, use this batch size
*/
static constexpr uint8_t packet_read_size = 32;
/******************************************************************************/
int DPDKDevice::init_port_start()
{
ceph_assert(_port_idx < rte_eth_dev_count_avail());
rte_eth_dev_info_get(_port_idx, &_dev_info);
//
// This is a workaround for a missing handling of a HW limitation in the
// DPDK i40e driver. This and all related to _is_i40e_device code should be
// removed once this handling is added.
//
if (std::string("rte_i40evf_pmd") == _dev_info.driver_name ||
std::string("rte_i40e_pmd") == _dev_info.driver_name) {
ldout(cct, 1) << __func__ << " Device is an Intel's 40G NIC. Enabling 8 fragments hack!" << dendl;
_is_i40e_device = true;
}
if (std::string("rte_vmxnet3_pmd") == _dev_info.driver_name) {
ldout(cct, 1) << __func__ << " Device is a VMWare Virtual NIC. Enabling 16 fragments hack!" << dendl;
_is_vmxnet3_device = true;
}
//
// Another workaround: this time for a lack of number of RSS bits.
// ixgbe PF NICs support up to 16 RSS queues.
// ixgbe VF NICs support up to 4 RSS queues.
// i40e PF NICs support up to 64 RSS queues.
// i40e VF NICs support up to 16 RSS queues.
//
if (std::string("rte_ixgbe_pmd") == _dev_info.driver_name) {
_dev_info.max_rx_queues = std::min(_dev_info.max_rx_queues, (uint16_t)16);
} else if (std::string("rte_ixgbevf_pmd") == _dev_info.driver_name) {
_dev_info.max_rx_queues = std::min(_dev_info.max_rx_queues, (uint16_t)4);
} else if (std::string("rte_i40e_pmd") == _dev_info.driver_name) {
_dev_info.max_rx_queues = std::min(_dev_info.max_rx_queues, (uint16_t)64);
} else if (std::string("rte_i40evf_pmd") == _dev_info.driver_name) {
_dev_info.max_rx_queues = std::min(_dev_info.max_rx_queues, (uint16_t)16);
}
// Hardware offload capabilities
// https://github.com/DPDK/dpdk/blob/v19.05/lib/librte_ethdev/rte_ethdev.h#L993-L1074
// We want to support all available offload features
// TODO: below features are implemented in 17.05, should support new ones
const uint64_t tx_offloads_wanted =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_UDP_TSO |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
DEV_TX_OFFLOAD_MACSEC_INSERT;
_dev_info.default_txconf.offloads =
_dev_info.tx_offload_capa & tx_offloads_wanted;
/* for port configuration all features are off by default */
rte_eth_conf port_conf = { 0 };
/* setting tx offloads for port */
port_conf.txmode.offloads = _dev_info.default_txconf.offloads;
ldout(cct, 5) << __func__ << " Port " << int(_port_idx) << ": max_rx_queues "
<< _dev_info.max_rx_queues << " max_tx_queues "
<< _dev_info.max_tx_queues << dendl;
_num_queues = std::min({_num_queues, _dev_info.max_rx_queues, _dev_info.max_tx_queues});
ldout(cct, 5) << __func__ << " Port " << int(_port_idx) << ": using "
<< _num_queues << " queues" << dendl;
// Set RSS mode: enable RSS if seastar is configured with more than 1 CPU.
// Even if port has a single queue we still want the RSS feature to be
// available in order to make HW calculate RSS hash for us.
if (_num_queues > 1) {
if (_dev_info.hash_key_size == 40) {
_rss_key = default_rsskey_40bytes;
} else if (_dev_info.hash_key_size == 52) {
_rss_key = default_rsskey_52bytes;
} else if (_dev_info.hash_key_size != 0) {
lderr(cct) << "Port " << int(_port_idx)
<< ": We support only 40 or 52 bytes RSS hash keys, "
<< int(_dev_info.hash_key_size) << " bytes key requested"
<< dendl;
return -EINVAL;
} else {
_rss_key = default_rsskey_40bytes;
_dev_info.hash_key_size = 40;
}
port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
/* enable all supported rss offloads */
port_conf.rx_adv_conf.rss_conf.rss_hf = _dev_info.flow_type_rss_offloads;
if (_dev_info.hash_key_size) {
port_conf.rx_adv_conf.rss_conf.rss_key = const_cast<uint8_t *>(_rss_key.data());
port_conf.rx_adv_conf.rss_conf.rss_key_len = _dev_info.hash_key_size;
}
} else {
port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
}
if (_num_queues > 1) {
if (_dev_info.reta_size) {
// RETA size should be a power of 2
ceph_assert((_dev_info.reta_size & (_dev_info.reta_size - 1)) == 0);
// Set the RSS table to the correct size
_redir_table.resize(_dev_info.reta_size);
_rss_table_bits = std::lround(std::log2(_dev_info.reta_size));
ldout(cct, 5) << __func__ << " Port " << int(_port_idx)
<< ": RSS table size is " << _dev_info.reta_size << dendl;
} else {
// FIXME: same with sw_reta
_redir_table.resize(128);
_rss_table_bits = std::lround(std::log2(128));
}
} else {
_redir_table.push_back(0);
}
// Set Rx VLAN stripping
if (_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
#ifdef RTE_ETHDEV_HAS_LRO_SUPPORT
// Enable LRO
if (_use_lro && (_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)) {
ldout(cct, 1) << __func__ << " LRO is on" << dendl;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_LRO;
_hw_features.rx_lro = true;
} else
#endif
ldout(cct, 1) << __func__ << " LRO is off" << dendl;
// Check that all CSUM features are either all set all together or not set
// all together. If this assumption breaks we need to rework the below logic
// by splitting the csum offload feature bit into separate bits for IPv4,
// TCP.
ceph_assert(((_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
(_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)) ||
(!(_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
!(_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)));
// Set Rx checksum checking
if ((_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
(_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)) {
ldout(cct, 1) << __func__ << " RX checksum offload supported" << dendl;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
_hw_features.rx_csum_offload = 1;
}
if ((_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
ldout(cct, 1) << __func__ << " TX ip checksum offload supported" << dendl;
_hw_features.tx_csum_ip_offload = 1;
}
// TSO is supported starting from DPDK v1.8
// TSO is abnormal in some DPDK versions (eg.dpdk-20.11-3.e18.aarch64), try
// disable TSO by ms_dpdk_enable_tso=false
if ((_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) &&
cct->_conf.get_val<bool>("ms_dpdk_enable_tso")) {
ldout(cct, 1) << __func__ << " TSO is supported" << dendl;
_hw_features.tx_tso = 1;
}
// Check that Tx TCP CSUM features are either all set all together
// or not set all together. If this assumption breaks we need to rework the
// below logic by splitting the csum offload feature bit into separate bits
// for TCP.
ceph_assert((_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) ||
!(_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM));
if (_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
ldout(cct, 1) << __func__ << " TX TCP checksum offload supported" << dendl;
_hw_features.tx_csum_l4_offload = 1;
}
int retval;
ldout(cct, 1) << __func__ << " Port " << int(_port_idx) << " init ... " << dendl;
/*
* Standard DPDK port initialisation - config port, then set up
* rx and tx rings.
*/
if ((retval = rte_eth_dev_configure(_port_idx, _num_queues, _num_queues,
&port_conf)) != 0) {
lderr(cct) << __func__ << " failed to configure port " << (int)_port_idx
<< " rx/tx queues " << _num_queues << " error " << cpp_strerror(retval) << dendl;
return retval;
}
//rte_eth_promiscuous_enable(port_num);
ldout(cct, 1) << __func__ << " done." << dendl;
return 0;
}
void DPDKDevice::set_hw_flow_control()
{
// Read the port's current/default flow control settings
struct rte_eth_fc_conf fc_conf;
auto ret = rte_eth_dev_flow_ctrl_get(_port_idx, &fc_conf);
if (ret == -ENOTSUP) {
ldout(cct, 1) << __func__ << " port " << int(_port_idx)
<< ": not support to get hardware flow control settings: " << ret << dendl;
goto not_supported;
}
if (ret < 0) {
lderr(cct) << __func__ << " port " << int(_port_idx)
<< ": failed to get hardware flow control settings: " << ret << dendl;
ceph_abort();
}
if (_enable_fc) {
fc_conf.mode = RTE_FC_FULL;
} else {
fc_conf.mode = RTE_FC_NONE;
}
ret = rte_eth_dev_flow_ctrl_set(_port_idx, &fc_conf);
if (ret == -ENOTSUP) {
ldout(cct, 1) << __func__ << " port " << int(_port_idx)
<< ": not support to set hardware flow control settings: " << ret << dendl;
goto not_supported;
}
if (ret < 0) {
lderr(cct) << __func__ << " port " << int(_port_idx)
<< ": failed to set hardware flow control settings: " << ret << dendl;
ceph_abort();
}
ldout(cct, 1) << __func__ << " port " << int(_port_idx) << ": HW FC " << _enable_fc << dendl;
return;
not_supported:
ldout(cct, 1) << __func__ << " port " << int(_port_idx) << ": changing HW FC settings is not supported" << dendl;
}
class XstatSocketHook : public AdminSocketHook {
DPDKDevice *dev;
public:
explicit XstatSocketHook(DPDKDevice *dev) : dev(dev) {}
int call(std::string_view prefix, const cmdmap_t& cmdmap,
Formatter *f,
std::ostream& ss,
bufferlist& out) override {
if (prefix == "show_pmd_stats") {
dev->nic_stats_dump(f);
} else if (prefix == "show_pmd_xstats") {
dev->nic_xstats_dump(f);
}
return 0;
}
};
int DPDKDevice::init_port_fini()
{
// Changing FC requires HW reset, so set it before the port is initialized.
set_hw_flow_control();
if (rte_eth_dev_start(_port_idx) != 0) {
lderr(cct) << __func__ << " can't start port " << _port_idx << dendl;
return -1;
}
if (_num_queues > 1)
set_rss_table();
// Wait for a link
if (check_port_link_status() < 0) {
lderr(cct) << __func__ << " port link up failed " << _port_idx << dendl;
return -1;
}
ldout(cct, 5) << __func__ << " created DPDK device" << dendl;
AdminSocket *admin_socket = cct->get_admin_socket();
dfx_hook = std::make_unique<XstatSocketHook>(this);
int r = admin_socket->register_command("show_pmd_stats", dfx_hook.get(),
"show pmd stats statistics");
ceph_assert(r == 0);
r = admin_socket->register_command("show_pmd_xstats", dfx_hook.get(),
"show pmd xstats statistics");
ceph_assert(r == 0);
return 0;
}
void DPDKDevice::set_rss_table()
{
struct rte_flow_attr attr;
struct rte_flow_item pattern[1];
struct rte_flow_action action[2];
struct rte_flow_action_rss rss_conf;
/*
* set the rule attribute.
* in this case only ingress packets will be checked.
*/
memset(&attr, 0, sizeof(struct rte_flow_attr));
attr.ingress = 1;
/* the final level must be always type end */
pattern[0].type = RTE_FLOW_ITEM_TYPE_END;
/*
* create the action sequence.
* one action only, set rss hash func to toeplitz.
*/
uint16_t i = 0;
for (auto& r : _redir_table) {
r = i++ % _num_queues;
}
rss_conf.func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
rss_conf.types = ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP;
rss_conf.queue_num = _num_queues;
rss_conf.queue = const_cast<uint16_t *>(_redir_table.data());
rss_conf.key_len = _dev_info.hash_key_size;
rss_conf.key = const_cast<uint8_t *>(_rss_key.data());
rss_conf.level = 0;
action[0].type = RTE_FLOW_ACTION_TYPE_RSS;
action[0].conf = &rss_conf;
action[1].type = RTE_FLOW_ACTION_TYPE_END;
if (rte_flow_validate(_port_idx, &attr, pattern, action, nullptr) == 0)
_flow = rte_flow_create(_port_idx, &attr, pattern, action, nullptr);
else
ldout(cct, 0) << __func__ << " Port " << _port_idx
<< ": flow rss func configuration is unsupported"
<< dendl;
}
void DPDKQueuePair::configure_proxies(const std::map<unsigned, float>& cpu_weights) {
ceph_assert(!cpu_weights.empty());
if (cpu_weights.size() == 1 && cpu_weights.begin()->first == _qid) {
// special case queue sending to self only, to avoid requiring a hash value
return;
}
register_packet_provider([this] {
std::optional<Packet> p;
if (!_proxy_packetq.empty()) {
p = std::move(_proxy_packetq.front());
_proxy_packetq.pop_front();
}
return p;
});
build_sw_reta(cpu_weights);
}
void DPDKQueuePair::build_sw_reta(const std::map<unsigned, float>& cpu_weights) {
float total_weight = 0;
for (auto&& x : cpu_weights) {
total_weight += x.second;
}
float accum = 0;
unsigned idx = 0;
std::array<uint8_t, 128> reta;
for (auto&& entry : cpu_weights) {
auto cpu = entry.first;
auto weight = entry.second;
accum += weight;
while (idx < (accum / total_weight * reta.size() - 0.5)) {
reta[idx++] = cpu;
}
}
_sw_reta = reta;
}
bool DPDKQueuePair::init_rx_mbuf_pool()
{
std::string name = std::string(pktmbuf_pool_name) + std::to_string(_qid) + "_rx";
// reserve the memory for Rx buffers containers
_rx_free_pkts.reserve(mbufs_per_queue_rx);
_rx_free_bufs.reserve(mbufs_per_queue_rx);
_pktmbuf_pool_rx = rte_mempool_lookup(name.c_str());
if (!_pktmbuf_pool_rx) {
ldout(cct, 1) << __func__ << " Creating Rx mbuf pool '" << name.c_str()
<< "' [" << mbufs_per_queue_rx << " mbufs] ..."<< dendl;
//
// Don't pass single-producer/single-consumer flags to mbuf create as it
// seems faster to use a cache instead.
//
struct rte_pktmbuf_pool_private roomsz = {};
roomsz.mbuf_data_room_size = mbuf_data_size + RTE_PKTMBUF_HEADROOM;
_pktmbuf_pool_rx = rte_mempool_create(
name.c_str(),
mbufs_per_queue_rx, mbuf_overhead + mbuf_data_size,
mbuf_cache_size,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, as_cookie(roomsz),
rte_pktmbuf_init, nullptr,
rte_socket_id(), 0);
if (!_pktmbuf_pool_rx) {
lderr(cct) << __func__ << " Failed to create mempool for rx" << dendl;
return false;
}
//
// allocate more data buffer
int bufs_count = cct->_conf->ms_dpdk_rx_buffer_count_per_core - mbufs_per_queue_rx;
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
std::string mz_name = "rx_buffer_data" + std::to_string(_qid);
const struct rte_memzone *mz = rte_memzone_reserve_aligned(mz_name.c_str(),
mbuf_data_size*bufs_count, _pktmbuf_pool_rx->socket_id, mz_flags, mbuf_data_size);
ceph_assert(mz);
void* m = mz->addr;
for (int i = 0; i < bufs_count; i++) {
ceph_assert(m);
_alloc_bufs.push_back(m);
m += mbuf_data_size;
}
if (rte_eth_rx_queue_setup(_dev_port_idx, _qid, default_ring_size,
rte_eth_dev_socket_id(_dev_port_idx),
_dev->def_rx_conf(), _pktmbuf_pool_rx) < 0) {
lderr(cct) << __func__ << " cannot initialize rx queue" << dendl;
return false;
}
}
return _pktmbuf_pool_rx != nullptr;
}
int DPDKDevice::check_port_link_status()
{
int count = 0;
ldout(cct, 20) << __func__ << dendl;
const int sleep_time = 100 * 1000;
const int max_check_time = 90; /* 9s (90 * 100ms) in total */
while (true) {
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
rte_eth_link_get_nowait(_port_idx, &link);
if (true) {
if (link.link_status) {
ldout(cct, 5) << __func__ << " done port "
<< static_cast<unsigned>(_port_idx)
<< " link Up - speed " << link.link_speed
<< " Mbps - "
<< ((link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n"))
<< dendl;
break;
} else if (count++ < max_check_time) {
ldout(cct, 20) << __func__ << " not ready, continue to wait." << dendl;
usleep(sleep_time);
} else {
lderr(cct) << __func__ << " done port " << _port_idx << " link down" << dendl;
return -1;
}
}
}
return 0;
}
class C_handle_dev_stats : public EventCallback {
DPDKQueuePair *_qp;
public:
C_handle_dev_stats(DPDKQueuePair *qp): _qp(qp) { }
void do_request(uint64_t id) {
_qp->handle_stats();
}
};
DPDKQueuePair::DPDKQueuePair(CephContext *c, EventCenter *cen, DPDKDevice* dev, uint8_t qid)
: cct(c), _dev(dev), _dev_port_idx(dev->port_idx()), center(cen), _qid(qid),
_tx_poller(this), _rx_gc_poller(this), _tx_buf_factory(c, dev, qid),
_tx_gc_poller(this)
{
if (!init_rx_mbuf_pool()) {
lderr(cct) << __func__ << " cannot initialize mbuf pools" << dendl;
ceph_abort();
}
static_assert(offsetof(tx_buf, private_end) -
offsetof(tx_buf, private_start) <= RTE_PKTMBUF_HEADROOM,
"RTE_PKTMBUF_HEADROOM is less than DPDKQueuePair::tx_buf size! "
"Increase the headroom size in the DPDK configuration");
static_assert(offsetof(tx_buf, _mbuf) == 0,
"There is a pad at the beginning of the tx_buf before _mbuf "
"field!");
static_assert((inline_mbuf_data_size & (inline_mbuf_data_size - 1)) == 0,
"inline_mbuf_data_size has to be a power of two!");
std::string name(std::string("queue") + std::to_string(qid));
PerfCountersBuilder plb(cct, name, l_dpdk_qp_first, l_dpdk_qp_last);
plb.add_u64_counter(l_dpdk_qp_rx_packets, "dpdk_receive_packets", "DPDK received packets");
plb.add_u64_counter(l_dpdk_qp_tx_packets, "dpdk_send_packets", "DPDK sendd packets");
plb.add_u64_counter(l_dpdk_qp_rx_bad_checksum_errors, "dpdk_receive_bad_checksum_errors", "DPDK received bad checksum packets");
plb.add_u64_counter(l_dpdk_qp_rx_no_memory_errors, "dpdk_receive_no_memory_errors", "DPDK received no memory packets");
plb.add_u64_counter(l_dpdk_qp_rx_bytes, "dpdk_receive_bytes", "DPDK received bytes", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_dpdk_qp_tx_bytes, "dpdk_send_bytes", "DPDK sendd bytes", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_dpdk_qp_rx_last_bunch, "dpdk_receive_last_bunch", "DPDK last received bunch");
plb.add_u64_counter(l_dpdk_qp_tx_last_bunch, "dpdk_send_last_bunch", "DPDK last send bunch");
plb.add_u64_counter(l_dpdk_qp_rx_fragments, "dpdk_receive_fragments", "DPDK received total fragments");
plb.add_u64_counter(l_dpdk_qp_tx_fragments, "dpdk_send_fragments", "DPDK sendd total fragments");
plb.add_u64_counter(l_dpdk_qp_rx_copy_ops, "dpdk_receive_copy_ops", "DPDK received copy operations");
plb.add_u64_counter(l_dpdk_qp_tx_copy_ops, "dpdk_send_copy_ops", "DPDK sendd copy operations");
plb.add_u64_counter(l_dpdk_qp_rx_copy_bytes, "dpdk_receive_copy_bytes", "DPDK received copy bytes", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_dpdk_qp_tx_copy_bytes, "dpdk_send_copy_bytes", "DPDK send copy bytes", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_dpdk_qp_rx_linearize_ops, "dpdk_receive_linearize_ops", "DPDK received linearize operations");
plb.add_u64_counter(l_dpdk_qp_tx_linearize_ops, "dpdk_send_linearize_ops", "DPDK send linearize operations");
plb.add_u64_counter(l_dpdk_qp_tx_queue_length, "dpdk_send_queue_length", "DPDK send queue length");
perf_logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perf_logger);
if (!_qid)
device_stat_time_fd = center->create_time_event(1000*1000, new C_handle_dev_stats(this));
}
void DPDKDevice::nic_stats_dump(Formatter *f)
{
static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
size_t tx_fragments = 0;
size_t rx_fragments = 0;
size_t tx_free_cnt = 0;
size_t rx_free_cnt = 0;
for (auto &qp: _queues) {
tx_fragments += qp->perf_logger->get(l_dpdk_qp_tx_fragments);
rx_fragments += qp->perf_logger->get(l_dpdk_qp_rx_fragments);
tx_free_cnt += qp->_tx_buf_factory.ring_size();
rx_free_cnt += rte_mempool_avail_count(qp->_pktmbuf_pool_rx);
}
struct rte_eth_stats stats;
rte_eth_stats_get(_port_idx, &stats);
f->open_object_section("RX");
f->dump_unsigned("in_packets", stats.ipackets);
f->dump_unsigned("recv_packets", rx_fragments);
f->dump_unsigned("in_bytes", stats.ibytes);
f->dump_unsigned("missed", stats.imissed);
f->dump_unsigned("errors", stats.ierrors);
f->close_section();
f->open_object_section("TX");
f->dump_unsigned("out_packets", stats.opackets);
f->dump_unsigned("send_packets", tx_fragments);
f->dump_unsigned("out_bytes", stats.obytes);
f->dump_unsigned("errors", stats.oerrors);
f->close_section();
f->open_object_section("stats");
f->dump_unsigned("RX_nombuf", stats.rx_nombuf);
f->dump_unsigned("RX_avail_mbufs", rx_free_cnt);
f->dump_unsigned("TX_avail_mbufs", tx_free_cnt);
uint64_t diff_cycles = prev_cycles[_port_idx];
prev_cycles[_port_idx] = rte_rdtsc();
if (diff_cycles > 0) {
diff_cycles = prev_cycles[_port_idx] - diff_cycles;
}
uint64_t diff_pkts_rx = (stats.ipackets > prev_pkts_rx[_port_idx]) ?
(stats.ipackets - prev_pkts_rx[_port_idx]) : 0;
uint64_t diff_pkts_tx = (stats.opackets > prev_pkts_tx[_port_idx]) ?
(stats.opackets - prev_pkts_tx[_port_idx]) : 0;
prev_pkts_rx[_port_idx] = stats.ipackets;
prev_pkts_tx[_port_idx] = stats.opackets;
uint64_t mpps_rx = diff_cycles > 0 ? diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
uint64_t mpps_tx = diff_cycles > 0 ? diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
f->dump_unsigned("Rx_pps", mpps_rx);
f->dump_unsigned("Tx_pps", mpps_tx);
f->close_section();
}
void DPDKDevice::nic_xstats_dump(Formatter *f)
{
// Get count
int cnt_xstats = rte_eth_xstats_get_names(_port_idx, NULL, 0);
if (cnt_xstats < 0) {
ldout(cct, 1) << "Error: Cannot get count of xstats" << dendl;
return;
}
// Get id-name lookup table
std::vector<struct rte_eth_xstat_name> xstats_names(cnt_xstats);
if (cnt_xstats != rte_eth_xstats_get_names(_port_idx, xstats_names.data(), cnt_xstats)) {
ldout(cct, 1) << "Error: Cannot get xstats lookup" << dendl;
return;
}
// Get stats themselves
std::vector<struct rte_eth_xstat> xstats(cnt_xstats);
if (cnt_xstats != rte_eth_xstats_get(_port_idx, xstats.data(), cnt_xstats)) {
ldout(cct, 1) << "Error: Unable to get xstats" << dendl;
return;
}
f->open_object_section("xstats");
for (int i = 0; i < cnt_xstats; i++){
f->dump_unsigned(xstats_names[i].name, xstats[i].value);
}
f->close_section();
}
void DPDKQueuePair::handle_stats()
{
ldout(cct, 20) << __func__ << " started." << dendl;
rte_eth_stats rte_stats = {};
int rc = rte_eth_stats_get(_dev_port_idx, &rte_stats);
if (rc) {
ldout(cct, 0) << __func__ << " failed to get port statistics: " << cpp_strerror(rc) << dendl;
return ;
}
#if RTE_VERSION < RTE_VERSION_NUM(16,7,0,0)
_dev->perf_logger->set(l_dpdk_dev_rx_mcast, rte_stats.imcasts);
_dev->perf_logger->set(l_dpdk_dev_rx_badcrc_errors, rte_stats.ibadcrc);
#endif
_dev->perf_logger->set(l_dpdk_dev_rx_dropped_errors, rte_stats.imissed);
_dev->perf_logger->set(l_dpdk_dev_rx_nombuf_errors, rte_stats.rx_nombuf);
_dev->perf_logger->set(l_dpdk_dev_rx_total_errors, rte_stats.ierrors);
_dev->perf_logger->set(l_dpdk_dev_tx_total_errors, rte_stats.oerrors);
device_stat_time_fd = center->create_time_event(1000*1000, new C_handle_dev_stats(this));
}
bool DPDKQueuePair::poll_tx() {
bool nonloopback = !cct->_conf->ms_dpdk_debug_allow_loopback;
#ifdef CEPH_PERF_DEV
uint64_t start = Cycles::rdtsc();
#endif
uint32_t total_work = 0;
if (_tx_packetq.size() < 16) {
// refill send queue from upper layers
uint32_t work;
do {
work = 0;
for (auto&& pr : _pkt_providers) {
auto p = pr();
if (p) {
work++;
if (likely(nonloopback)) {
// ldout(cct, 0) << __func__ << " len: " << p->len() << " frags: " << p->nr_frags() << dendl;
_tx_packetq.push_back(std::move(*p));
} else {
auto th = p->get_header<eth_hdr>(0);
if (th->dst_mac == th->src_mac) {
_dev->l2receive(_qid, std::move(*p));
} else {
_tx_packetq.push_back(std::move(*p));
}
}
if (_tx_packetq.size() == 128) {
break;
}
}
}
total_work += work;
} while (work && total_work < 256 && _tx_packetq.size() < 128);
}
if (!_tx_packetq.empty()) {
uint64_t c = send(_tx_packetq);
perf_logger->inc(l_dpdk_qp_tx_packets, c);
perf_logger->set(l_dpdk_qp_tx_last_bunch, c);
#ifdef CEPH_PERF_DEV
tx_count += total_work;
tx_cycles += Cycles::rdtsc() - start;
#endif
return true;
}
return false;
}
inline std::optional<Packet> DPDKQueuePair::from_mbuf_lro(rte_mbuf* m)
{
_frags.clear();
_bufs.clear();
for (; m != nullptr; m = m->next) {
char* data = rte_pktmbuf_mtod(m, char*);
_frags.emplace_back(fragment{data, rte_pktmbuf_data_len(m)});
_bufs.push_back(data);
}
auto del = std::bind(
[this](std::vector<char*> &bufs) {
for (auto&& b : bufs) { _alloc_bufs.push_back(b); }
}, std::move(_bufs));
return Packet(
_frags.begin(), _frags.end(), make_deleter(std::move(del)));
}
inline std::optional<Packet> DPDKQueuePair::from_mbuf(rte_mbuf* m)
{
_rx_free_pkts.push_back(m);
_num_rx_free_segs += m->nb_segs;
if (!_dev->hw_features_ref().rx_lro || rte_pktmbuf_is_contiguous(m)) {
char* data = rte_pktmbuf_mtod(m, char*);
return Packet(fragment{data, rte_pktmbuf_data_len(m)},
make_deleter([this, data] { _alloc_bufs.push_back(data); }));
} else {
return from_mbuf_lro(m);
}
}
inline bool DPDKQueuePair::refill_one_cluster(rte_mbuf* head)
{
for (; head != nullptr; head = head->next) {
if (!refill_rx_mbuf(head, mbuf_data_size, _alloc_bufs)) {
//
// If we failed to allocate a new buffer - push the rest of the
// cluster back to the free_packets list for a later retry.
//
_rx_free_pkts.push_back(head);
return false;
}
_rx_free_bufs.push_back(head);
}
return true;
}
bool DPDKQueuePair::rx_gc(bool force)
{
if (_num_rx_free_segs >= rx_gc_thresh || force) {
ldout(cct, 10) << __func__ << " free segs " << _num_rx_free_segs
<< " thresh " << rx_gc_thresh
<< " free pkts " << _rx_free_pkts.size()
<< dendl;
while (!_rx_free_pkts.empty()) {
//
// Use back() + pop_back() semantics to avoid an extra
// _rx_free_pkts.clear() at the end of the function - clear() has a
// linear complexity.
//
auto m = _rx_free_pkts.back();
_rx_free_pkts.pop_back();
if (!refill_one_cluster(m)) {
ldout(cct, 1) << __func__ << " get new mbuf failed " << dendl;
break;
}
}
for (auto&& m : _rx_free_bufs) {
rte_pktmbuf_prefree_seg(m);
}
if (_rx_free_bufs.size()) {
rte_mempool_put_bulk(_pktmbuf_pool_rx,
(void **)_rx_free_bufs.data(),
_rx_free_bufs.size());
// TODO: ceph_assert() in a fast path! Remove me ASAP!
ceph_assert(_num_rx_free_segs >= _rx_free_bufs.size());
_num_rx_free_segs -= _rx_free_bufs.size();
_rx_free_bufs.clear();
// TODO: ceph_assert() in a fast path! Remove me ASAP!
ceph_assert((_rx_free_pkts.empty() && !_num_rx_free_segs) ||
(!_rx_free_pkts.empty() && _num_rx_free_segs));
}
}
return _num_rx_free_segs >= rx_gc_thresh;
}
void DPDKQueuePair::process_packets(
struct rte_mbuf **bufs, uint16_t count)
{
uint64_t nr_frags = 0, bytes = 0;
for (uint16_t i = 0; i < count; i++) {
struct rte_mbuf *m = bufs[i];
offload_info oi;
std::optional<Packet> p = from_mbuf(m);
// Drop the packet if translation above has failed
if (!p) {
perf_logger->inc(l_dpdk_qp_rx_no_memory_errors);
continue;
}
// ldout(cct, 0) << __func__ << " len " << p->len() << " " << dendl;
nr_frags += m->nb_segs;
bytes += m->pkt_len;
// Set stipped VLAN value if available
if ((_dev->_dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) &&
(m->ol_flags & PKT_RX_VLAN_STRIPPED)) {
oi.vlan_tci = m->vlan_tci;
}
if (_dev->get_hw_features().rx_csum_offload) {
if (m->ol_flags & (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD)) {
// Packet with bad checksum, just drop it.
perf_logger->inc(l_dpdk_qp_rx_bad_checksum_errors);
continue;
}
// Note that when _hw_features.rx_csum_offload is on, the receive
// code for ip, tcp and udp will assume they don't need to check
// the checksum again, because we did this here.
}
p->set_offload_info(oi);
if (m->ol_flags & PKT_RX_RSS_HASH) {
p->set_rss_hash(m->hash.rss);
}
_dev->l2receive(_qid, std::move(*p));
}
perf_logger->inc(l_dpdk_qp_rx_packets, count);
perf_logger->set(l_dpdk_qp_rx_last_bunch, count);
perf_logger->inc(l_dpdk_qp_rx_fragments, nr_frags);
perf_logger->inc(l_dpdk_qp_rx_bytes, bytes);
}
bool DPDKQueuePair::poll_rx_once()
{
struct rte_mbuf *buf[packet_read_size];
/* read a port */
#ifdef CEPH_PERF_DEV
uint64_t start = Cycles::rdtsc();
#endif
uint16_t count = rte_eth_rx_burst(_dev_port_idx, _qid,
buf, packet_read_size);
/* Now process the NIC packets read */
if (likely(count > 0)) {
process_packets(buf, count);
#ifdef CEPH_PERF_DEV
rx_cycles = Cycles::rdtsc() - start;
rx_count += count;
#endif
}
#ifdef CEPH_PERF_DEV
else {
if (rx_count > 10000 && tx_count) {
ldout(cct, 0) << __func__ << " rx count=" << rx_count << " avg rx=" << Cycles::to_nanoseconds(rx_cycles)/rx_count << "ns "
<< " tx count=" << tx_count << " avg tx=" << Cycles::to_nanoseconds(tx_cycles)/tx_count << "ns"
<< dendl;
rx_count = rx_cycles = tx_count = tx_cycles = 0;
}
}
#endif
return count;
}
DPDKQueuePair::tx_buf_factory::tx_buf_factory(CephContext *c,
DPDKDevice *dev, uint8_t qid): cct(c)
{
std::string name = std::string(pktmbuf_pool_name) + std::to_string(qid) + "_tx";
_pool = rte_mempool_lookup(name.c_str());
if (!_pool) {
ldout(cct, 0) << __func__ << " Creating Tx mbuf pool '" << name.c_str()
<< "' [" << mbufs_per_queue_tx << " mbufs] ..." << dendl;
//
// We are going to push the buffers from the mempool into
// the circular_buffer and then poll them from there anyway, so
// we prefer to make a mempool non-atomic in this case.
//
_pool = rte_mempool_create(name.c_str(),
mbufs_per_queue_tx, inline_mbuf_size,
mbuf_cache_size,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, nullptr,
rte_pktmbuf_init, nullptr,
rte_socket_id(), 0);
if (!_pool) {
lderr(cct) << __func__ << " Failed to create mempool for Tx" << dendl;
ceph_abort();
}
if (rte_eth_tx_queue_setup(dev->port_idx(), qid, default_ring_size,
rte_eth_dev_socket_id(dev->port_idx()),
dev->def_tx_conf()) < 0) {
lderr(cct) << __func__ << " cannot initialize tx queue" << dendl;
ceph_abort();
}
}
//
// Fill the factory with the buffers from the mempool allocated
// above.
//
init_factory();
}
bool DPDKQueuePair::tx_buf::i40e_should_linearize(rte_mbuf *head)
{
bool is_tso = head->ol_flags & PKT_TX_TCP_SEG;
// For a non-TSO case: number of fragments should not exceed 8
if (!is_tso){
return head->nb_segs > i40e_max_xmit_segment_frags;
}
//
// For a TSO case each MSS window should not include more than 8
// fragments including headers.
//
// Calculate the number of frags containing headers.
//
// Note: we support neither VLAN nor tunneling thus headers size
// accounting is super simple.
//
size_t headers_size = head->l2_len + head->l3_len + head->l4_len;
unsigned hdr_frags = 0;
size_t cur_payload_len = 0;
rte_mbuf *cur_seg = head;
while (cur_seg && cur_payload_len < headers_size) {
cur_payload_len += cur_seg->data_len;
cur_seg = cur_seg->next;
hdr_frags++;
}
//
// Header fragments will be used for each TSO segment, thus the
// maximum number of data segments will be 8 minus the number of
// header fragments.
//
// It's unclear from the spec how the first TSO segment is treated
// if the last fragment with headers contains some data bytes:
// whether this fragment will be accounted as a single fragment or
// as two separate fragments. We prefer to play it safe and assume
// that this fragment will be accounted as two separate fragments.
//
size_t max_win_size = i40e_max_xmit_segment_frags - hdr_frags;
if (head->nb_segs <= max_win_size) {
return false;
}
// Get the data (without headers) part of the first data fragment
size_t prev_frag_data = cur_payload_len - headers_size;
auto mss = head->tso_segsz;
while (cur_seg) {
unsigned frags_in_seg = 0;
size_t cur_seg_size = 0;
if (prev_frag_data) {
cur_seg_size = prev_frag_data;
frags_in_seg++;
prev_frag_data = 0;
}
while (cur_seg_size < mss && cur_seg) {
cur_seg_size += cur_seg->data_len;
cur_seg = cur_seg->next;
frags_in_seg++;
if (frags_in_seg > max_win_size) {
return true;
}
}
if (cur_seg_size > mss) {
prev_frag_data = cur_seg_size - mss;
}
}
return false;
}
void DPDKQueuePair::tx_buf::set_cluster_offload_info(const Packet& p, const DPDKQueuePair& qp, rte_mbuf* head)
{
// Handle TCP checksum offload
auto oi = p.offload_info();
if (oi.needs_ip_csum) {
head->ol_flags |= PKT_TX_IP_CKSUM;
// TODO: Take a VLAN header into an account here
head->l2_len = sizeof(struct rte_ether_hdr);
head->l3_len = oi.ip_hdr_len;
}
if (qp.port().get_hw_features().tx_csum_l4_offload) {
if (oi.protocol == ip_protocol_num::tcp) {
head->ol_flags |= PKT_TX_TCP_CKSUM;
// TODO: Take a VLAN header into an account here
head->l2_len = sizeof(struct rte_ether_hdr);
head->l3_len = oi.ip_hdr_len;
if (oi.tso_seg_size) {
ceph_assert(oi.needs_ip_csum);
head->ol_flags |= PKT_TX_TCP_SEG;
head->l4_len = oi.tcp_hdr_len;
head->tso_segsz = oi.tso_seg_size;
}
}
}
}
DPDKQueuePair::tx_buf* DPDKQueuePair::tx_buf::from_packet_zc(
CephContext *cct, Packet&& p, DPDKQueuePair& qp)
{
// Too fragmented - linearize
if (p.nr_frags() > max_frags) {
p.linearize();
qp.perf_logger->inc(l_dpdk_qp_tx_linearize_ops);
}
build_mbuf_cluster:
rte_mbuf *head = nullptr, *last_seg = nullptr;
unsigned nsegs = 0;
//
// Create a HEAD of the fragmented packet: check if frag0 has to be
// copied and if yes - send it in a copy way
//
if (!check_frag0(p)) {
if (!copy_one_frag(qp, p.frag(0), head, last_seg, nsegs)) {
ldout(cct, 1) << __func__ << " no available mbuf for " << p.frag(0).size << dendl;
return nullptr;
}
} else if (!translate_one_frag(qp, p.frag(0), head, last_seg, nsegs)) {
ldout(cct, 1) << __func__ << " no available mbuf for " << p.frag(0).size << dendl;
return nullptr;
}
unsigned total_nsegs = nsegs;
for (unsigned i = 1; i < p.nr_frags(); i++) {
rte_mbuf *h = nullptr, *new_last_seg = nullptr;
if (!translate_one_frag(qp, p.frag(i), h, new_last_seg, nsegs)) {
ldout(cct, 1) << __func__ << " no available mbuf for " << p.frag(i).size << dendl;
me(head)->recycle();
return nullptr;
}
total_nsegs += nsegs;
// Attach a new buffers' chain to the packet chain
last_seg->next = h;
last_seg = new_last_seg;
}
// Update the HEAD buffer with the packet info
head->pkt_len = p.len();
head->nb_segs = total_nsegs;
// tx_pkt_burst loops until the next pointer is null, so last_seg->next must
// be null.
last_seg->next = nullptr;
set_cluster_offload_info(p, qp, head);
//
// If a packet hasn't been linearized already and the resulting
// cluster requires the linearisation due to HW limitation:
//
// - Recycle the cluster.
// - Linearize the packet.
// - Build the cluster once again
//
if (head->nb_segs > max_frags ||
(p.nr_frags() > 1 && qp.port().is_i40e_device() && i40e_should_linearize(head)) ||
(p.nr_frags() > vmxnet3_max_xmit_segment_frags && qp.port().is_vmxnet3_device())) {
me(head)->recycle();
p.linearize();
qp.perf_logger->inc(l_dpdk_qp_tx_linearize_ops);
goto build_mbuf_cluster;
}
me(last_seg)->set_packet(std::move(p));
return me(head);
}
void DPDKQueuePair::tx_buf::copy_packet_to_cluster(const Packet& p, rte_mbuf* head)
{
rte_mbuf* cur_seg = head;
size_t cur_seg_offset = 0;
unsigned cur_frag_idx = 0;
size_t cur_frag_offset = 0;
while (true) {
size_t to_copy = std::min(p.frag(cur_frag_idx).size - cur_frag_offset,
inline_mbuf_data_size - cur_seg_offset);
memcpy(rte_pktmbuf_mtod_offset(cur_seg, void*, cur_seg_offset),
p.frag(cur_frag_idx).base + cur_frag_offset, to_copy);
cur_frag_offset += to_copy;
cur_seg_offset += to_copy;
if (cur_frag_offset >= p.frag(cur_frag_idx).size) {
++cur_frag_idx;
if (cur_frag_idx >= p.nr_frags()) {
//
// We are done - set the data size of the last segment
// of the cluster.
//
cur_seg->data_len = cur_seg_offset;
break;
}
cur_frag_offset = 0;
}
if (cur_seg_offset >= inline_mbuf_data_size) {
cur_seg->data_len = inline_mbuf_data_size;
cur_seg = cur_seg->next;
cur_seg_offset = 0;
// FIXME: assert in a fast-path - remove!!!
ceph_assert(cur_seg);
}
}
}
DPDKQueuePair::tx_buf* DPDKQueuePair::tx_buf::from_packet_copy(Packet&& p, DPDKQueuePair& qp)
{
// sanity
if (!p.len()) {
return nullptr;
}
/*
* Here we are going to use the fact that the inline data size is a
* power of two.
*
* We will first try to allocate the cluster and only if we are
* successful - we will go and copy the data.
*/
auto aligned_len = align_up((size_t)p.len(), inline_mbuf_data_size);
unsigned nsegs = aligned_len / inline_mbuf_data_size;
rte_mbuf *head = nullptr, *last_seg = nullptr;
tx_buf* buf = qp.get_tx_buf();
if (!buf) {
return nullptr;
}
head = buf->rte_mbuf_p();
last_seg = head;
for (unsigned i = 1; i < nsegs; i++) {
buf = qp.get_tx_buf();
if (!buf) {
me(head)->recycle();
return nullptr;
}
last_seg->next = buf->rte_mbuf_p();
last_seg = last_seg->next;
}
//
// If we've got here means that we have succeeded already!
// We only need to copy the data and set the head buffer with the
// relevant info.
//
head->pkt_len = p.len();
head->nb_segs = nsegs;
// tx_pkt_burst loops until the next pointer is null, so last_seg->next must
// be null.
last_seg->next = nullptr;
copy_packet_to_cluster(p, head);
set_cluster_offload_info(p, qp, head);
return me(head);
}
size_t DPDKQueuePair::tx_buf::copy_one_data_buf(
DPDKQueuePair& qp, rte_mbuf*& m, char* data, size_t buf_len)
{
tx_buf* buf = qp.get_tx_buf();
if (!buf) {
return 0;
}
size_t len = std::min(buf_len, inline_mbuf_data_size);
m = buf->rte_mbuf_p();
// mbuf_put()
m->data_len = len;
m->pkt_len = len;
qp.perf_logger->inc(l_dpdk_qp_tx_copy_ops);
qp.perf_logger->inc(l_dpdk_qp_tx_copy_bytes, len);
memcpy(rte_pktmbuf_mtod(m, void*), data, len);
return len;
}
/******************************** Interface functions *************************/
std::unique_ptr<DPDKDevice> create_dpdk_net_device(
CephContext *cct,
unsigned cores,
uint8_t port_idx,
bool use_lro,
bool enable_fc)
{
// Check that we have at least one DPDK-able port
if (rte_eth_dev_count_avail() == 0) {
ceph_assert(false && "No Ethernet ports - bye\n");
} else {
ldout(cct, 10) << __func__ << " ports number: " << int(rte_eth_dev_count_avail()) << dendl;
}
return std::unique_ptr<DPDKDevice>(
new DPDKDevice(cct, port_idx, cores, use_lro, enable_fc));
}
| 45,791 | 32.254902 | 131 |
cc
|
null |
ceph-main/src/msg/async/dpdk/DPDK.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#ifndef CEPH_DPDK_DEV_H
#define CEPH_DPDK_DEV_H
#include <functional>
#include <memory>
#include <optional>
#include <rte_config.h>
#include <rte_common.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_malloc.h>
#include <rte_version.h>
#include "include/page.h"
#include "common/perf_counters.h"
#include "common/admin_socket.h"
#include "msg/async/Event.h"
#include "const.h"
#include "circular_buffer.h"
#include "ethernet.h"
#include "Packet.h"
#include "stream.h"
#include "net.h"
#include "toeplitz.h"
struct free_deleter {
void operator()(void* p) { ::free(p); }
};
enum {
l_dpdk_dev_first = 58800,
l_dpdk_dev_rx_mcast,
l_dpdk_dev_rx_total_errors,
l_dpdk_dev_tx_total_errors,
l_dpdk_dev_rx_badcrc_errors,
l_dpdk_dev_rx_dropped_errors,
l_dpdk_dev_rx_nombuf_errors,
l_dpdk_dev_last
};
enum {
l_dpdk_qp_first = 58900,
l_dpdk_qp_rx_packets,
l_dpdk_qp_tx_packets,
l_dpdk_qp_rx_bad_checksum_errors,
l_dpdk_qp_rx_no_memory_errors,
l_dpdk_qp_rx_bytes,
l_dpdk_qp_tx_bytes,
l_dpdk_qp_rx_last_bunch,
l_dpdk_qp_tx_last_bunch,
l_dpdk_qp_rx_fragments,
l_dpdk_qp_tx_fragments,
l_dpdk_qp_rx_copy_ops,
l_dpdk_qp_tx_copy_ops,
l_dpdk_qp_rx_copy_bytes,
l_dpdk_qp_tx_copy_bytes,
l_dpdk_qp_rx_linearize_ops,
l_dpdk_qp_tx_linearize_ops,
l_dpdk_qp_tx_queue_length,
l_dpdk_qp_last
};
class DPDKDevice;
class DPDKWorker;
#ifndef MARKER
typedef void *MARKER[0]; /**< generic marker for a point in a structure */
#endif
class DPDKQueuePair {
using packet_provider_type = std::function<std::optional<Packet> ()>;
public:
void configure_proxies(const std::map<unsigned, float>& cpu_weights);
// build REdirection TAble for cpu_weights map: target cpu -> weight
void build_sw_reta(const std::map<unsigned, float>& cpu_weights);
void proxy_send(Packet p) {
_proxy_packetq.push_back(std::move(p));
}
void register_packet_provider(packet_provider_type func) {
_pkt_providers.push_back(std::move(func));
}
bool poll_tx();
friend class DPDKDevice;
class tx_buf_factory;
class tx_buf {
friend class DPDKQueuePair;
public:
static tx_buf* me(rte_mbuf* mbuf) {
return reinterpret_cast<tx_buf*>(mbuf);
}
private:
/**
* Checks if the original packet of a given cluster should be linearized
* due to HW limitations.
*
* @param head head of a cluster to check
*
* @return TRUE if a packet should be linearized.
*/
static bool i40e_should_linearize(rte_mbuf *head);
/**
* Sets the offload info in the head buffer of an rte_mbufs cluster.
*
* @param p an original packet the cluster is built for
* @param qp QP handle
* @param head a head of an rte_mbufs cluster
*/
static void set_cluster_offload_info(const Packet& p, const DPDKQueuePair& qp, rte_mbuf* head);
/**
* Creates a tx_buf cluster representing a given packet in a "zero-copy"
* way.
*
* @param p packet to translate
* @param qp DPDKQueuePair handle
*
* @return the HEAD tx_buf of the cluster or nullptr in case of a
* failure
*/
static tx_buf* from_packet_zc(
CephContext *cct, Packet&& p, DPDKQueuePair& qp);
/**
* Copy the contents of the "packet" into the given cluster of
* rte_mbuf's.
*
* @note Size of the cluster has to be big enough to accommodate all the
* contents of the given packet.
*
* @param p packet to copy
* @param head head of the rte_mbuf's cluster
*/
static void copy_packet_to_cluster(const Packet& p, rte_mbuf* head);
/**
* Creates a tx_buf cluster representing a given packet in a "copy" way.
*
* @param p packet to translate
* @param qp DPDKQueuePair handle
*
* @return the HEAD tx_buf of the cluster or nullptr in case of a
* failure
*/
static tx_buf* from_packet_copy(Packet&& p, DPDKQueuePair& qp);
/**
* Zero-copy handling of a single fragment.
*
* @param do_one_buf Functor responsible for a single rte_mbuf
* handling
* @param qp DPDKQueuePair handle (in)
* @param frag Fragment to copy (in)
* @param head Head of the cluster (out)
* @param last_seg Last segment of the cluster (out)
* @param nsegs Number of segments in the cluster (out)
*
* @return TRUE in case of success
*/
template <class DoOneBufFunc>
static bool do_one_frag(DoOneBufFunc do_one_buf, DPDKQueuePair& qp,
fragment& frag, rte_mbuf*& head,
rte_mbuf*& last_seg, unsigned& nsegs) {
size_t len, left_to_set = frag.size;
char* base = frag.base;
rte_mbuf* m;
// TODO: ceph_assert() in a fast path! Remove me ASAP!
ceph_assert(frag.size);
// Create a HEAD of mbufs' cluster and set the first bytes into it
len = do_one_buf(qp, head, base, left_to_set);
if (!len) {
return false;
}
left_to_set -= len;
base += len;
nsegs = 1;
//
// Set the rest of the data into the new mbufs and chain them to
// the cluster.
//
rte_mbuf* prev_seg = head;
while (left_to_set) {
len = do_one_buf(qp, m, base, left_to_set);
if (!len) {
me(head)->recycle();
return false;
}
left_to_set -= len;
base += len;
nsegs++;
prev_seg->next = m;
prev_seg = m;
}
// Return the last mbuf in the cluster
last_seg = prev_seg;
return true;
}
/**
* Zero-copy handling of a single fragment.
*
* @param qp DPDKQueuePair handle (in)
* @param frag Fragment to copy (in)
* @param head Head of the cluster (out)
* @param last_seg Last segment of the cluster (out)
* @param nsegs Number of segments in the cluster (out)
*
* @return TRUE in case of success
*/
static bool translate_one_frag(DPDKQueuePair& qp, fragment& frag,
rte_mbuf*& head, rte_mbuf*& last_seg,
unsigned& nsegs) {
return do_one_frag(set_one_data_buf, qp, frag, head,
last_seg, nsegs);
}
/**
* Copies one fragment into the cluster of rte_mbuf's.
*
* @param qp DPDKQueuePair handle (in)
* @param frag Fragment to copy (in)
* @param head Head of the cluster (out)
* @param last_seg Last segment of the cluster (out)
* @param nsegs Number of segments in the cluster (out)
*
* We return the "last_seg" to avoid traversing the cluster in order to get
* it.
*
* @return TRUE in case of success
*/
static bool copy_one_frag(DPDKQueuePair& qp, fragment& frag,
rte_mbuf*& head, rte_mbuf*& last_seg,
unsigned& nsegs) {
return do_one_frag(copy_one_data_buf, qp, frag, head,
last_seg, nsegs);
}
/**
* Allocates a single rte_mbuf and sets it to point to a given data
* buffer.
*
* @param qp DPDKQueuePair handle (in)
* @param m New allocated rte_mbuf (out)
* @param va virtual address of a data buffer (in)
* @param buf_len length of the data to copy (in)
*
* @return The actual number of bytes that has been set in the mbuf
*/
static size_t set_one_data_buf(
DPDKQueuePair& qp, rte_mbuf*& m, char* va, size_t buf_len) {
static constexpr size_t max_frag_len = 15 * 1024; // 15K
// FIXME: current all tx buf is allocated without rte_malloc
return copy_one_data_buf(qp, m, va, buf_len);
//
// Currently we break a buffer on a 15K boundary because 82599
// devices have a 15.5K limitation on a maximum single fragment
// size.
//
rte_iova_t pa = rte_malloc_virt2iova(va);
if (!pa)
return copy_one_data_buf(qp, m, va, buf_len);
ceph_assert(buf_len);
tx_buf* buf = qp.get_tx_buf();
if (!buf) {
return 0;
}
size_t len = std::min(buf_len, max_frag_len);
buf->set_zc_info(va, pa, len);
m = buf->rte_mbuf_p();
return len;
}
/**
* Allocates a single rte_mbuf and copies a given data into it.
*
* @param qp DPDKQueuePair handle (in)
* @param m New allocated rte_mbuf (out)
* @param data Data to copy from (in)
* @param buf_len length of the data to copy (in)
*
* @return The actual number of bytes that has been copied
*/
static size_t copy_one_data_buf(
DPDKQueuePair& qp, rte_mbuf*& m, char* data, size_t buf_len);
/**
* Checks if the first fragment of the given packet satisfies the
* zero-copy flow requirement: its first 128 bytes should not cross the
* 4K page boundary. This is required in order to avoid splitting packet
* headers.
*
* @param p packet to check
*
* @return TRUE if packet is ok and FALSE otherwise.
*/
static bool check_frag0(Packet& p)
{
//
// First frag is special - it has headers that should not be split.
// If the addressing is such that the first fragment has to be
// split, then send this packet in a (non-zero) copy flow. We'll
// check if the first 128 bytes of the first fragment reside in the
// physically contiguous area. If that's the case - we are good to
// go.
//
if (p.frag(0).size < 128)
return false;
return true;
}
public:
tx_buf(tx_buf_factory& fc) : _fc(fc) {
_buf_physaddr = _mbuf.buf_iova;
_data_off = _mbuf.data_off;
}
rte_mbuf* rte_mbuf_p() { return &_mbuf; }
void set_zc_info(void* va, phys_addr_t pa, size_t len) {
// mbuf_put()
_mbuf.data_len = len;
_mbuf.pkt_len = len;
// Set the mbuf to point to our data
_mbuf.buf_addr = va;
_mbuf.buf_iova = pa;
_mbuf.data_off = 0;
_is_zc = true;
}
void reset_zc() {
//
// If this mbuf was the last in a cluster and contains an
// original packet object then call the destructor of the
// original packet object.
//
if (_p) {
//
// Reset the std::optional. This in particular is going
// to call the "packet"'s destructor and reset the
// "optional" state to "nonengaged".
//
_p.reset();
} else if (!_is_zc) {
return;
}
// Restore the rte_mbuf fields we trashed in set_zc_info()
_mbuf.buf_iova = _buf_physaddr;
_mbuf.buf_addr = rte_mbuf_to_baddr(&_mbuf);
_mbuf.data_off = _data_off;
_is_zc = false;
}
void recycle() {
struct rte_mbuf *m = &_mbuf, *m_next;
while (m != nullptr) {
m_next = m->next;
rte_pktmbuf_reset(m);
_fc.put(me(m));
m = m_next;
}
}
void set_packet(Packet&& p) {
_p = std::move(p);
}
private:
struct rte_mbuf _mbuf;
MARKER private_start;
std::optional<Packet> _p;
phys_addr_t _buf_physaddr;
uint16_t _data_off;
// TRUE if underlying mbuf has been used in the zero-copy flow
bool _is_zc = false;
// buffers' factory the buffer came from
tx_buf_factory& _fc;
MARKER private_end;
};
class tx_buf_factory {
//
// Number of buffers to free in each GC iteration:
// We want the buffers to be allocated from the mempool as many as
// possible.
//
// On the other hand if there is no Tx for some time we want the
// completions to be eventually handled. Thus we choose the smallest
// possible packets count number here.
//
static constexpr int gc_count = 1;
public:
tx_buf_factory(CephContext *c, DPDKDevice *dev, uint8_t qid);
~tx_buf_factory() {
// put all mbuf back into mempool in order to make the next factory work
while (gc());
rte_mempool_put_bulk(_pool, (void**)_ring.data(),
_ring.size());
}
/**
* @note Should not be called if there are no free tx_buf's
*
* @return a free tx_buf object
*/
tx_buf* get() {
// Take completed from the HW first
tx_buf *pkt = get_one_completed();
if (pkt) {
pkt->reset_zc();
return pkt;
}
//
// If there are no completed at the moment - take from the
// factory's cache.
//
if (_ring.empty()) {
return nullptr;
}
pkt = _ring.back();
_ring.pop_back();
return pkt;
}
void put(tx_buf* buf) {
buf->reset_zc();
_ring.push_back(buf);
}
unsigned ring_size() const {
return _ring.size();
}
bool gc() {
for (int cnt = 0; cnt < gc_count; ++cnt) {
auto tx_buf_p = get_one_completed();
if (!tx_buf_p) {
return false;
}
put(tx_buf_p);
}
return true;
}
private:
/**
* Fill the mbufs circular buffer: after this the _pool will become
* empty. We will use it to catch the completed buffers:
*
* - Underlying PMD drivers will "free" the mbufs once they are
* completed.
* - We will poll the _pktmbuf_pool_tx till it's empty and release
* all the buffers from the freed mbufs.
*/
void init_factory() {
while (rte_mbuf* mbuf = rte_pktmbuf_alloc(_pool)) {
_ring.push_back(new(tx_buf::me(mbuf)) tx_buf{*this});
}
}
/**
* PMD puts the completed buffers back into the mempool they have
* originally come from.
*
* @note rte_pktmbuf_alloc() resets the mbuf so there is no need to call
* rte_pktmbuf_reset() here again.
*
* @return a single tx_buf that has been completed by HW.
*/
tx_buf* get_one_completed() {
return tx_buf::me(rte_pktmbuf_alloc(_pool));
}
private:
CephContext *cct;
std::vector<tx_buf*> _ring;
rte_mempool* _pool = nullptr;
};
public:
explicit DPDKQueuePair(CephContext *c, EventCenter *cen, DPDKDevice* dev, uint8_t qid);
~DPDKQueuePair() {
if (device_stat_time_fd) {
center->delete_time_event(device_stat_time_fd);
}
rx_gc(true);
}
void rx_start() {
_rx_poller.emplace(this);
}
uint32_t send(circular_buffer<Packet>& pb) {
// Zero-copy send
return _send(pb, [&] (Packet&& p) {
return tx_buf::from_packet_zc(cct, std::move(p), *this);
});
}
DPDKDevice& port() const { return *_dev; }
tx_buf* get_tx_buf() { return _tx_buf_factory.get(); }
void handle_stats();
private:
template <class Func>
uint32_t _send(circular_buffer<Packet>& pb, Func &&packet_to_tx_buf_p) {
if (_tx_burst.size() == 0) {
for (auto&& p : pb) {
// TODO: ceph_assert() in a fast path! Remove me ASAP!
ceph_assert(p.len());
tx_buf* buf = packet_to_tx_buf_p(std::move(p));
if (!buf) {
break;
}
_tx_burst.push_back(buf->rte_mbuf_p());
}
}
uint16_t sent = rte_eth_tx_burst(_dev_port_idx, _qid,
_tx_burst.data() + _tx_burst_idx,
_tx_burst.size() - _tx_burst_idx);
uint64_t nr_frags = 0, bytes = 0;
for (int i = 0; i < sent; i++) {
rte_mbuf* m = _tx_burst[_tx_burst_idx + i];
bytes += m->pkt_len;
nr_frags += m->nb_segs;
pb.pop_front();
}
perf_logger->inc(l_dpdk_qp_tx_fragments, nr_frags);
perf_logger->inc(l_dpdk_qp_tx_bytes, bytes);
_tx_burst_idx += sent;
if (_tx_burst_idx == _tx_burst.size()) {
_tx_burst_idx = 0;
_tx_burst.clear();
}
return sent;
}
/**
* Allocate a new data buffer and set the mbuf to point to it.
*
* Do some DPDK hacks to work on PMD: it assumes that the buf_addr
* points to the private data of RTE_PKTMBUF_HEADROOM before the actual
* data buffer.
*
* @param m mbuf to update
*/
static bool refill_rx_mbuf(rte_mbuf* m, size_t size,
std::vector<void*> &datas) {
if (datas.empty())
return false;
void *data = datas.back();
datas.pop_back();
//
// Set the mbuf to point to our data.
//
// Do some DPDK hacks to work on PMD: it assumes that the buf_addr
// points to the private data of RTE_PKTMBUF_HEADROOM before the
// actual data buffer.
//
m->buf_addr = (char*)data - RTE_PKTMBUF_HEADROOM;
m->buf_iova = rte_mem_virt2iova(data) - RTE_PKTMBUF_HEADROOM;
return true;
}
bool init_rx_mbuf_pool();
bool rx_gc(bool force=false);
bool refill_one_cluster(rte_mbuf* head);
/**
* Polls for a burst of incoming packets. This function will not block and
* will immediately return after processing all available packets.
*
*/
bool poll_rx_once();
/**
* Translates an rte_mbuf's into packet and feeds them to _rx_stream.
*
* @param bufs An array of received rte_mbuf's
* @param count Number of buffers in the bufs[]
*/
void process_packets(struct rte_mbuf **bufs, uint16_t count);
/**
* Translate rte_mbuf into the "packet".
* @param m mbuf to translate
*
* @return a "optional" object representing the newly received data if in an
* "engaged" state or an error if in a "disengaged" state.
*/
std::optional<Packet> from_mbuf(rte_mbuf* m);
/**
* Transform an LRO rte_mbuf cluster into the "packet" object.
* @param m HEAD of the mbufs' cluster to transform
*
* @return a "optional" object representing the newly received LRO packet if
* in an "engaged" state or an error if in a "disengaged" state.
*/
std::optional<Packet> from_mbuf_lro(rte_mbuf* m);
private:
CephContext *cct;
std::vector<packet_provider_type> _pkt_providers;
std::optional<std::array<uint8_t, 128>> _sw_reta;
circular_buffer<Packet> _proxy_packetq;
stream<Packet> _rx_stream;
circular_buffer<Packet> _tx_packetq;
std::vector<void*> _alloc_bufs;
PerfCounters *perf_logger;
DPDKDevice* _dev;
uint8_t _dev_port_idx;
EventCenter *center;
uint8_t _qid;
rte_mempool *_pktmbuf_pool_rx;
std::vector<rte_mbuf*> _rx_free_pkts;
std::vector<rte_mbuf*> _rx_free_bufs;
std::vector<fragment> _frags;
std::vector<char*> _bufs;
size_t _num_rx_free_segs = 0;
uint64_t device_stat_time_fd = 0;
#ifdef CEPH_PERF_DEV
uint64_t rx_cycles = 0;
uint64_t rx_count = 0;
uint64_t tx_cycles = 0;
uint64_t tx_count = 0;
#endif
class DPDKTXPoller : public EventCenter::Poller {
DPDKQueuePair *qp;
public:
explicit DPDKTXPoller(DPDKQueuePair *qp)
: EventCenter::Poller(qp->center, "DPDK::DPDKTXPoller"), qp(qp) {}
virtual int poll() {
return qp->poll_tx();
}
} _tx_poller;
class DPDKRXGCPoller : public EventCenter::Poller {
DPDKQueuePair *qp;
public:
explicit DPDKRXGCPoller(DPDKQueuePair *qp)
: EventCenter::Poller(qp->center, "DPDK::DPDKRXGCPoller"), qp(qp) {}
virtual int poll() {
return qp->rx_gc();
}
} _rx_gc_poller;
tx_buf_factory _tx_buf_factory;
class DPDKRXPoller : public EventCenter::Poller {
DPDKQueuePair *qp;
public:
explicit DPDKRXPoller(DPDKQueuePair *qp)
: EventCenter::Poller(qp->center, "DPDK::DPDKRXPoller"), qp(qp) {}
virtual int poll() {
return qp->poll_rx_once();
}
};
std::optional<DPDKRXPoller> _rx_poller;
class DPDKTXGCPoller : public EventCenter::Poller {
DPDKQueuePair *qp;
public:
explicit DPDKTXGCPoller(DPDKQueuePair *qp)
: EventCenter::Poller(qp->center, "DPDK::DPDKTXGCPoller"), qp(qp) {}
virtual int poll() {
return qp->_tx_buf_factory.gc();
}
} _tx_gc_poller;
std::vector<rte_mbuf*> _tx_burst;
uint16_t _tx_burst_idx = 0;
};
class DPDKDevice {
public:
CephContext *cct;
PerfCounters *perf_logger;
std::vector<std::unique_ptr<DPDKQueuePair>> _queues;
std::vector<DPDKWorker*> workers;
size_t _rss_table_bits = 0;
uint8_t _port_idx;
uint16_t _num_queues;
unsigned cores;
hw_features _hw_features;
uint8_t _queues_ready = 0;
unsigned _home_cpu;
bool _use_lro;
bool _enable_fc;
std::vector<uint16_t> _redir_table;
rss_key_type _rss_key;
struct rte_flow *_flow = nullptr;
bool _is_i40e_device = false;
bool _is_vmxnet3_device = false;
std::unique_ptr<AdminSocketHook> dfx_hook;
public:
rte_eth_dev_info _dev_info = {};
/**
* The final stage of a port initialization.
* @note Must be called *after* all queues from stage (2) have been
* initialized.
*/
int init_port_fini();
void nic_stats_dump(Formatter *f);
void nic_xstats_dump(Formatter *f);
private:
/**
* Port initialization consists of 3 main stages:
* 1) General port initialization which ends with a call to
* rte_eth_dev_configure() where we request the needed number of Rx and
* Tx queues.
* 2) Individual queues initialization. This is done in the constructor of
* DPDKQueuePair class. In particular the memory pools for queues are allocated
* in this stage.
* 3) The final stage of the initialization which starts with the call of
* rte_eth_dev_start() after which the port becomes fully functional. We
* will also wait for a link to get up in this stage.
*/
/**
* First stage of the port initialization.
*
* @return 0 in case of success and an appropriate error code in case of an
* error.
*/
int init_port_start();
/**
* Check the link status of out port in up to 9s, and print them finally.
*/
int check_port_link_status();
/**
* Configures the HW Flow Control
*/
void set_hw_flow_control();
public:
DPDKDevice(CephContext *c, uint8_t port_idx, uint16_t num_queues, bool use_lro, bool enable_fc):
cct(c), _port_idx(port_idx), _num_queues(num_queues),
_home_cpu(0), _use_lro(use_lro),
_enable_fc(enable_fc) {
_queues = std::vector<std::unique_ptr<DPDKQueuePair>>(_num_queues);
/* now initialise the port we will use */
int ret = init_port_start();
if (ret != 0) {
ceph_assert(false && "Cannot initialise port\n");
}
std::string name(std::string("port") + std::to_string(port_idx));
PerfCountersBuilder plb(cct, name, l_dpdk_dev_first, l_dpdk_dev_last);
plb.add_u64_counter(l_dpdk_dev_rx_mcast, "dpdk_device_receive_multicast_packets", "DPDK received multicast packets");
plb.add_u64_counter(l_dpdk_dev_rx_badcrc_errors, "dpdk_device_receive_badcrc_errors", "DPDK received bad crc errors");
plb.add_u64_counter(l_dpdk_dev_rx_total_errors, "dpdk_device_receive_total_errors", "DPDK received total_errors");
plb.add_u64_counter(l_dpdk_dev_tx_total_errors, "dpdk_device_send_total_errors", "DPDK sendd total_errors");
plb.add_u64_counter(l_dpdk_dev_rx_dropped_errors, "dpdk_device_receive_dropped_errors", "DPDK received dropped errors");
plb.add_u64_counter(l_dpdk_dev_rx_nombuf_errors, "dpdk_device_receive_nombuf_errors", "DPDK received RX mbuf allocation errors");
perf_logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perf_logger);
}
~DPDKDevice() {
cct->get_admin_socket()->unregister_commands(dfx_hook.get());
dfx_hook.reset();
if (_flow)
rte_flow_destroy(_port_idx, _flow, nullptr);
rte_eth_dev_stop(_port_idx);
}
DPDKQueuePair& queue_for_cpu(unsigned cpu) { return *_queues[cpu]; }
void l2receive(int qid, Packet p) {
_queues[qid]->_rx_stream.produce(std::move(p));
}
subscription<Packet> receive(unsigned cpuid, std::function<int (Packet)> next_packet) {
auto sub = _queues[cpuid]->_rx_stream.listen(std::move(next_packet));
_queues[cpuid]->rx_start();
return sub;
}
ethernet_address hw_address() {
struct rte_ether_addr mac;
rte_eth_macaddr_get(_port_idx, &mac);
return mac.addr_bytes;
}
hw_features get_hw_features() {
return _hw_features;
}
const rss_key_type& rss_key() const { return _rss_key; }
uint16_t hw_queues_count() { return _num_queues; }
std::unique_ptr<DPDKQueuePair> init_local_queue(CephContext *c,
EventCenter *center, std::string hugepages, uint16_t qid) {
std::unique_ptr<DPDKQueuePair> qp;
qp = std::unique_ptr<DPDKQueuePair>(new DPDKQueuePair(c, center, this, qid));
return qp;
}
unsigned hash2qid(uint32_t hash) {
// return hash % hw_queues_count();
return _redir_table[hash & (_redir_table.size() - 1)];
}
void set_local_queue(unsigned i, std::unique_ptr<DPDKQueuePair> qp) {
ceph_assert(!_queues[i]);
_queues[i] = std::move(qp);
}
void unset_local_queue(unsigned i) {
ceph_assert(_queues[i]);
_queues[i].reset();
}
template <typename Func>
unsigned forward_dst(unsigned src_cpuid, Func&& hashfn) {
auto& qp = queue_for_cpu(src_cpuid);
if (!qp._sw_reta)
return src_cpuid;
ceph_assert(!qp._sw_reta);
auto hash = hashfn() >> _rss_table_bits;
auto& reta = *qp._sw_reta;
return reta[hash % reta.size()];
}
unsigned hash2cpu(uint32_t hash) {
// there is an assumption here that qid == get_id() which will
// not necessary be true in the future
return forward_dst(hash2qid(hash), [hash] { return hash; });
}
hw_features& hw_features_ref() { return _hw_features; }
const rte_eth_rxconf* def_rx_conf() const {
return &_dev_info.default_rxconf;
}
const rte_eth_txconf* def_tx_conf() const {
return &_dev_info.default_txconf;
}
/**
* Set the RSS table in the device and store it in the internal vector.
*/
void set_rss_table();
uint8_t port_idx() { return _port_idx; }
bool is_i40e_device() const {
return _is_i40e_device;
}
bool is_vmxnet3_device() const {
return _is_vmxnet3_device;
}
};
std::unique_ptr<DPDKDevice> create_dpdk_net_device(
CephContext *c, unsigned cores, uint8_t port_idx = 0,
bool use_lro = true, bool enable_fc = true);
/**
* @return Number of bytes needed for mempool objects of each QP.
*/
uint32_t qp_mempool_obj_size();
#endif // CEPH_DPDK_DEV_H
| 27,139 | 27.933902 | 133 |
h
|
null |
ceph-main/src/msg/async/dpdk/DPDKStack.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <memory>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <tuple>
#include "common/ceph_argparse.h"
#include "dpdk_rte.h"
#include "DPDKStack.h"
#include "DPDK.h"
#include "IP.h"
#include "TCP-Stack.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#include "common/Cond.h"
#define dout_subsys ceph_subsys_dpdk
#undef dout_prefix
#define dout_prefix *_dout << "dpdkstack "
static int dpdk_thread_adaptor(void* f)
{
(*static_cast<std::function<void ()>*>(f))();
return 0;
}
void DPDKWorker::initialize()
{
static enum {
WAIT_DEVICE_STAGE,
WAIT_PORT_FIN_STAGE,
DONE
} create_stage = WAIT_DEVICE_STAGE;
static ceph::mutex lock = ceph::make_mutex("DPDKStack::lock");
static ceph::condition_variable cond;
static unsigned queue_init_done = 0;
static unsigned cores = 0;
static std::shared_ptr<DPDKDevice> sdev;
unsigned i = center.get_id();
if (i == 0) {
// Hardcoded port index 0.
// TODO: Inherit it from the opts
cores = cct->_conf->ms_async_op_threads;
std::unique_ptr<DPDKDevice> dev = create_dpdk_net_device(
cct, cores, cct->_conf->ms_dpdk_port_id,
cct->_conf->ms_dpdk_lro,
cct->_conf->ms_dpdk_hw_flow_control);
sdev = std::shared_ptr<DPDKDevice>(dev.release());
sdev->workers.resize(cores);
ldout(cct, 1) << __func__ << " using " << cores << " cores " << dendl;
std::lock_guard l{lock};
create_stage = WAIT_PORT_FIN_STAGE;
cond.notify_all();
} else {
std::unique_lock l{lock};
cond.wait(l, [] { return create_stage > WAIT_DEVICE_STAGE; });
}
ceph_assert(sdev);
if (i < sdev->hw_queues_count()) {
auto qp = sdev->init_local_queue(cct, ¢er, cct->_conf->ms_dpdk_hugepages, i);
std::map<unsigned, float> cpu_weights;
for (unsigned j = sdev->hw_queues_count() + i % sdev->hw_queues_count();
j < cores; j+= sdev->hw_queues_count())
cpu_weights[i] = 1;
cpu_weights[i] = cct->_conf->ms_dpdk_hw_queue_weight;
qp->configure_proxies(cpu_weights);
sdev->set_local_queue(i, std::move(qp));
std::lock_guard l{lock};
++queue_init_done;
cond.notify_all();
} else {
// auto master = qid % sdev->hw_queues_count();
// sdev->set_local_queue(create_proxy_net_device(master, sdev.get()));
ceph_abort();
}
if (i == 0) {
{
std::unique_lock l{lock};
cond.wait(l, [] { return queue_init_done >= cores; });
}
if (sdev->init_port_fini() < 0) {
lderr(cct) << __func__ << " init_port_fini failed " << dendl;
ceph_abort();
}
std::lock_guard l{lock};
create_stage = DONE;
cond.notify_all();
} else {
std::unique_lock l{lock};
cond.wait(l, [&] { return create_stage > WAIT_PORT_FIN_STAGE; });
}
sdev->workers[i] = this;
_impl = std::unique_ptr<DPDKWorker::Impl>(
new DPDKWorker::Impl(cct, i, ¢er, sdev));
{
std::lock_guard l{lock};
if (!--queue_init_done) {
create_stage = WAIT_DEVICE_STAGE;
sdev.reset();
}
}
}
using AvailableIPAddress = std::tuple<std::string, std::string, std::string>;
static bool parse_available_address(
const std::string &ips, const std::string &gates,
const std::string &masks, std::vector<AvailableIPAddress> &res)
{
std::vector<std::string> ip_vec, gate_vec, mask_vec;
string_to_vec(ip_vec, ips);
string_to_vec(gate_vec, gates);
string_to_vec(mask_vec, masks);
if (ip_vec.empty() || ip_vec.size() != gate_vec.size() || ip_vec.size() != mask_vec.size())
return false;
for (size_t i = 0; i < ip_vec.size(); ++i) {
res.push_back(AvailableIPAddress{ip_vec[i], gate_vec[i], mask_vec[i]});
}
return true;
}
static bool match_available_address(const std::vector<AvailableIPAddress> &avails,
const entity_addr_t &ip, int &res)
{
for (size_t i = 0; i < avails.size(); ++i) {
entity_addr_t addr;
auto a = std::get<0>(avails[i]).c_str();
if (!addr.parse(a))
continue;
if (addr.is_same_host(ip)) {
res = i;
return true;
}
}
return false;
}
DPDKWorker::Impl::Impl(CephContext *cct, unsigned i, EventCenter *c, std::shared_ptr<DPDKDevice> dev)
: id(i), _netif(cct, dev, c), _dev(dev), _inet(cct, c, &_netif)
{
std::vector<AvailableIPAddress> tuples;
bool parsed = parse_available_address(cct->_conf.get_val<std::string>("ms_dpdk_host_ipv4_addr"),
cct->_conf.get_val<std::string>("ms_dpdk_gateway_ipv4_addr"),
cct->_conf.get_val<std::string>("ms_dpdk_netmask_ipv4_addr"), tuples);
if (!parsed) {
lderr(cct) << __func__ << " no available address "
<< cct->_conf.get_val<std::string>("ms_dpdk_host_ipv4_addr") << ", "
<< cct->_conf.get_val<std::string>("ms_dpdk_gateway_ipv4_addr") << ", "
<< cct->_conf.get_val<std::string>("ms_dpdk_netmask_ipv4_addr") << ", "
<< dendl;
ceph_abort();
}
_inet.set_host_address(ipv4_address(std::get<0>(tuples[0])));
_inet.set_gw_address(ipv4_address(std::get<1>(tuples[0])));
_inet.set_netmask_address(ipv4_address(std::get<2>(tuples[0])));
}
DPDKWorker::Impl::~Impl()
{
_dev->unset_local_queue(id);
}
int DPDKWorker::listen(entity_addr_t &sa,
unsigned addr_slot,
const SocketOptions &opt,
ServerSocket *sock)
{
ceph_assert(sa.get_family() == AF_INET);
ceph_assert(sock);
ldout(cct, 10) << __func__ << " addr " << sa << dendl;
// vector<AvailableIPAddress> tuples;
// bool parsed = parse_available_address(cct->_conf->ms_dpdk_host_ipv4_addr,
// cct->_conf->ms_dpdk_gateway_ipv4_addr,
// cct->_conf->ms_dpdk_netmask_ipv4_addr, tuples);
// if (!parsed) {
// lderr(cct) << __func__ << " no available address "
// << cct->_conf->ms_dpdk_host_ipv4_addr << ", "
// << cct->_conf->ms_dpdk_gateway_ipv4_addr << ", "
// << cct->_conf->ms_dpdk_netmask_ipv4_addr << ", "
// << dendl;
// return -EINVAL;
// }
// int idx;
// parsed = match_available_address(tuples, sa, idx);
// if (!parsed) {
// lderr(cct) << __func__ << " no matched address for " << sa << dendl;
// return -EINVAL;
// }
// _inet.set_host_address(ipv4_address(std::get<0>(tuples[idx])));
// _inet.set_gw_address(ipv4_address(std::get<1>(tuples[idx])));
// _inet.set_netmask_address(ipv4_address(std::get<2>(tuples[idx])));
return tcpv4_listen(_impl->_inet.get_tcp(), sa.get_port(), opt, sa.get_type(),
addr_slot, sock);
}
int DPDKWorker::connect(const entity_addr_t &addr, const SocketOptions &opts, ConnectedSocket *socket)
{
// ceph_assert(addr.get_family() == AF_INET);
int r = tcpv4_connect(_impl->_inet.get_tcp(), addr, socket);
ldout(cct, 10) << __func__ << " addr " << addr << dendl;
return r;
}
void DPDKStack::spawn_worker(std::function<void ()> &&func)
{
// create a extra master thread
//
funcs.push_back(std::move(func));
int r = 0;
r = eal.start();
if (r < 0) {
lderr(cct) << __func__ << " start dpdk rte failed, r=" << r << dendl;
ceph_abort();
}
// if eal.start already called by NVMEDevice, we will select 1..n
// cores
unsigned nr_worker = funcs.size();
ceph_assert(rte_lcore_count() >= nr_worker);
unsigned core_id;
RTE_LCORE_FOREACH_SLAVE(core_id) {
if (--nr_worker == 0) {
break;
}
}
void *adapted_func = static_cast<void*>(&funcs.back());
eal.execute_on_master([adapted_func, core_id, this]() {
int r = rte_eal_remote_launch(dpdk_thread_adaptor, adapted_func, core_id);
if (r < 0) {
lderr(cct) << __func__ << " remote launch failed, r=" << r << dendl;
ceph_abort();
}
});
}
void DPDKStack::join_worker(unsigned i)
{
eal.execute_on_master([&]() {
rte_eal_wait_lcore(i+1);
});
if (i+1 == get_num_worker())
eal.stop();
}
| 9,216 | 31.340351 | 110 |
cc
|
null |
ceph-main/src/msg/async/dpdk/DPDKStack.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_DPDKSTACK_H
#define CEPH_MSG_DPDKSTACK_H
#include <functional>
#include <optional>
#include "common/ceph_context.h"
#include "msg/async/Stack.h"
#include "net.h"
#include "const.h"
#include "IP.h"
#include "Packet.h"
#include "dpdk_rte.h"
class interface;
template <typename Protocol>
class NativeConnectedSocketImpl;
// DPDKServerSocketImpl
template <typename Protocol>
class DPDKServerSocketImpl : public ServerSocketImpl {
typename Protocol::listener _listener;
public:
DPDKServerSocketImpl(Protocol& proto, uint16_t port, const SocketOptions &opt,
int type, unsigned addr_slot);
int listen() {
return _listener.listen();
}
virtual int accept(ConnectedSocket *s, const SocketOptions &opts, entity_addr_t *out, Worker *w) override;
virtual void abort_accept() override;
virtual int fd() const override {
return _listener.fd();
}
virtual void set_priority(int sd, int prio, int domain) override {}
};
// NativeConnectedSocketImpl
template <typename Protocol>
class NativeConnectedSocketImpl : public ConnectedSocketImpl {
typename Protocol::connection _conn;
uint32_t _cur_frag = 0;
uint32_t _cur_off = 0;
std::optional<Packet> _buf;
std::optional<bufferptr> _cache_ptr;
public:
explicit NativeConnectedSocketImpl(typename Protocol::connection conn)
: _conn(std::move(conn)) {}
NativeConnectedSocketImpl(NativeConnectedSocketImpl &&rhs)
: _conn(std::move(rhs._conn)), _buf(std::move(rhs.buf)) {}
virtual int is_connected() override {
return _conn.is_connected();
}
virtual ssize_t read(char *buf, size_t len) override {
size_t left = len;
ssize_t r = 0;
size_t off = 0;
while (left > 0) {
if (!_cache_ptr) {
_cache_ptr.emplace();
r = zero_copy_read(*_cache_ptr);
if (r <= 0) {
_cache_ptr.reset();
if (r == -EAGAIN)
break;
return r;
}
}
if (_cache_ptr->length() <= left) {
_cache_ptr->copy_out(0, _cache_ptr->length(), buf+off);
left -= _cache_ptr->length();
off += _cache_ptr->length();
_cache_ptr.reset();
} else {
_cache_ptr->copy_out(0, left, buf+off);
_cache_ptr->set_offset(_cache_ptr->offset() + left);
_cache_ptr->set_length(_cache_ptr->length() - left);
left = 0;
break;
}
}
return len - left ? len - left : -EAGAIN;
}
private:
ssize_t zero_copy_read(bufferptr &data) {
auto err = _conn.get_errno();
if (err <= 0)
return err;
if (!_buf) {
_buf = std::move(_conn.read());
if (!_buf)
return -EAGAIN;
}
fragment &f = _buf->frag(_cur_frag);
Packet p = _buf->share(_cur_off, f.size);
auto del = std::bind(
[](Packet &p) {}, std::move(p));
data = buffer::claim_buffer(
f.size, f.base, make_deleter(std::move(del)));
if (++_cur_frag == _buf->nr_frags()) {
_cur_frag = 0;
_cur_off = 0;
_buf.reset();
} else {
_cur_off += f.size;
}
ceph_assert(data.length());
return data.length();
}
virtual ssize_t send(bufferlist &bl, bool more) override {
auto err = _conn.get_errno();
if (err < 0)
return (ssize_t)err;
size_t available = _conn.peek_sent_available();
if (available == 0) {
return 0;
}
std::vector<fragment> frags;
auto pb = bl.buffers().begin();
uint64_t len = 0;
uint64_t seglen = 0;
while (len < available && pb != bl.buffers().end()) {
seglen = pb->length();
// Buffer length is zero, no need to send, so skip it
if (seglen == 0) {
++pb;
continue;
}
if (len + seglen > available) {
// don't continue if we enough at least 1 fragment since no available
// space for next ptr.
if (len > 0)
break;
seglen = std::min(seglen, available);
}
len += seglen;
frags.push_back(fragment{(char*)pb->c_str(), seglen});
++pb;
}
if (len != bl.length()) {
bufferlist swapped;
bl.splice(0, len, &swapped);
auto del = std::bind(
[](bufferlist &bl) {}, std::move(swapped));
return _conn.send(Packet(std::move(frags), make_deleter(std::move(del))));
} else {
auto del = std::bind(
[](bufferlist &bl) {}, std::move(bl));
return _conn.send(Packet(std::move(frags), make_deleter(std::move(del))));
}
}
public:
virtual void shutdown() override {
_conn.close_write();
}
// FIXME need to impl close
virtual void close() override {
_conn.close_write();
}
virtual int fd() const override {
return _conn.fd();
}
};
template <typename Protocol>
DPDKServerSocketImpl<Protocol>::DPDKServerSocketImpl(
Protocol& proto, uint16_t port, const SocketOptions &opt,
int type, unsigned addr_slot)
: ServerSocketImpl(type, addr_slot), _listener(proto.listen(port)) {}
template <typename Protocol>
int DPDKServerSocketImpl<Protocol>::accept(ConnectedSocket *s, const SocketOptions &options, entity_addr_t *out, Worker *w) {
if (_listener.get_errno() < 0)
return _listener.get_errno();
auto c = _listener.accept();
if (!c)
return -EAGAIN;
if (out) {
*out = c->remote_addr();
out->set_type(addr_type);
}
std::unique_ptr<NativeConnectedSocketImpl<Protocol>> csi(
new NativeConnectedSocketImpl<Protocol>(std::move(*c)));
*s = ConnectedSocket(std::move(csi));
return 0;
}
template <typename Protocol>
void DPDKServerSocketImpl<Protocol>::abort_accept() {
_listener.abort_accept();
}
class DPDKWorker : public Worker {
struct Impl {
unsigned id;
interface _netif;
std::shared_ptr<DPDKDevice> _dev;
ipv4 _inet;
Impl(CephContext *cct, unsigned i, EventCenter *c, std::shared_ptr<DPDKDevice> dev);
~Impl();
};
std::unique_ptr<Impl> _impl;
virtual void initialize() override;
void set_ipv4_packet_filter(ip_packet_filter* filter) {
_impl->_inet.set_packet_filter(filter);
}
using tcp4 = tcp<ipv4_traits>;
public:
explicit DPDKWorker(CephContext *c, unsigned i): Worker(c, i) {}
virtual int listen(entity_addr_t &addr, unsigned addr_slot,
const SocketOptions &opts, ServerSocket *) override;
virtual int connect(const entity_addr_t &addr, const SocketOptions &opts, ConnectedSocket *socket) override;
void arp_learn(ethernet_address l2, ipv4_address l3) {
_impl->_inet.learn(l2, l3);
}
virtual void destroy() override {
_impl.reset();
}
friend class DPDKServerSocketImpl<tcp4>;
};
using namespace dpdk;
class DPDKStack : public NetworkStack {
std::vector<std::function<void()> > funcs;
virtual Worker* create_worker(CephContext *c, unsigned worker_id) override {
return new DPDKWorker(c, worker_id);
}
virtual void rename_thread(unsigned id) override {}
public:
explicit DPDKStack(CephContext *cct): NetworkStack(cct), eal(cct) {
funcs.reserve(cct->_conf->ms_async_op_threads);
}
virtual bool support_local_listen_table() const override { return true; }
virtual void spawn_worker(std::function<void ()> &&func) override;
virtual void join_worker(unsigned i) override;
private:
dpdk::eal eal;
};
#endif
| 7,675 | 27.117216 | 125 |
h
|
null |
ceph-main/src/msg/async/dpdk/EventDPDK.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include "DPDKStack.h"
#include "EventDPDK.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << "DPDKDriver."
int DPDKDriver::init(EventCenter *c, int nevent)
{
return 0;
}
int DPDKDriver::add_event(int fd, int cur_mask, int add_mask)
{
ldout(cct, 20) << __func__ << " add event fd=" << fd << " cur_mask=" << cur_mask
<< " add_mask=" << add_mask << dendl;
int r = manager.listen(fd, add_mask);
if (r < 0) {
lderr(cct) << __func__ << " add fd=" << fd << " failed. "
<< cpp_strerror(-r) << dendl;
return -errno;
}
return 0;
}
int DPDKDriver::del_event(int fd, int cur_mask, int delmask)
{
ldout(cct, 20) << __func__ << " del event fd=" << fd << " cur_mask=" << cur_mask
<< " delmask=" << delmask << dendl;
int r = 0;
if (delmask != EVENT_NONE) {
if ((r = manager.unlisten(fd, delmask)) < 0) {
lderr(cct) << __func__ << " delete fd=" << fd << " delmask=" << delmask
<< " failed." << cpp_strerror(-r) << dendl;
return r;
}
}
return 0;
}
int DPDKDriver::resize_events(int newsize)
{
return 0;
}
int DPDKDriver::event_wait(std::vector<FiredFileEvent> &fired_events, struct timeval *tvp)
{
int num_events = 512;
int events[num_events];
int masks[num_events];
int retval = manager.poll(events, masks, num_events, tvp);
if (retval > 0) {
fired_events.resize(retval);
for (int i = 0; i < retval; i++) {
fired_events[i].fd = events[i];
fired_events[i].mask = masks[i];
}
}
return retval;
}
| 2,059 | 22.953488 | 90 |
cc
|
null |
ceph-main/src/msg/async/dpdk/EventDPDK.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_EVENTDPDK_H
#define CEPH_EVENTDPDK_H
#include "msg/async/Event.h"
#include "msg/async/Stack.h"
#include "UserspaceEvent.h"
class DPDKDriver : public EventDriver {
CephContext *cct;
public:
UserspaceEventManager manager;
explicit DPDKDriver(CephContext *c): cct(c), manager(c) {}
virtual ~DPDKDriver() { }
int init(EventCenter *c, int nevent) override;
int add_event(int fd, int cur_mask, int add_mask) override;
int del_event(int fd, int cur_mask, int del_mask) override;
int resize_events(int newsize) override;
int event_wait(std::vector<FiredFileEvent> &fired_events, struct timeval *tp) override;
bool need_wakeup() override { return false; }
};
#endif //CEPH_EVENTDPDK_H
| 1,152 | 27.121951 | 89 |
h
|
null |
ceph-main/src/msg/async/dpdk/IP.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*
*/
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/perf_counters.h"
#include "capture.h"
#include "IP.h"
#include "toeplitz.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_dpdk
#undef dout_prefix
#define dout_prefix *_dout << "dpdk "
std::ostream& operator<<(std::ostream& os, const ipv4_address& a) {
auto ip = a.ip;
return os << ((ip >> 24) & 0xff) << "." << ((ip >> 16) & 0xff)
<< "." << ((ip >> 8) & 0xff) << "." << ((ip >> 0) & 0xff);
}
utime_t ipv4::_frag_timeout = utime_t(30, 0);
constexpr uint32_t ipv4::_frag_low_thresh;
constexpr uint32_t ipv4::_frag_high_thresh;
class C_handle_frag_timeout : public EventCallback {
ipv4 *_ipv4;
public:
C_handle_frag_timeout(ipv4 *i): _ipv4(i) {}
void do_request(uint64_t fd_or_id) {
_ipv4->frag_timeout();
}
};
enum {
l_dpdk_qp_first = 99000,
l_dpdk_total_linearize_operations,
l_dpdk_qp_last
};
struct icmp_hdr {
enum class msg_type : uint8_t {
echo_reply = 0,
echo_request = 8,
};
msg_type type;
uint8_t code;
uint16_t csum;
uint32_t rest;
} __attribute__((packed));
ipv4::ipv4(CephContext *c, EventCenter *cen, interface* netif)
: cct(c), center(cen), _netif(netif), _global_arp(netif),
_arp(c, _global_arp, cen),
_host_address(0), _gw_address(0), _netmask(0),
_l3(netif, eth_protocol_num::ipv4, [this] { return get_packet(); }),
_rx_packets(
_l3.receive(
[this] (Packet p, ethernet_address ea) {
return handle_received_packet(std::move(p), ea);
},
[this] (forward_hash& out_hash_data, Packet& p, size_t off) {
return forward(out_hash_data, p, off);
}
)
),
_tcp(*this, cen), _icmp(c, *this),
_l4({{ uint8_t(ip_protocol_num::tcp), &_tcp },
{ uint8_t(ip_protocol_num::icmp), &_icmp }}),
_packet_filter(nullptr)
{
PerfCountersBuilder plb(cct, "ipv4", l_dpdk_qp_first, l_dpdk_qp_last);
plb.add_u64_counter(l_dpdk_total_linearize_operations, "dpdk_ip_linearize_operations", "DPDK IP Packet linearization operations");
perf_logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perf_logger);
frag_handler = new C_handle_frag_timeout(this);
}
bool ipv4::forward(forward_hash& out_hash_data, Packet& p, size_t off)
{
auto iph = p.get_header<ip_hdr>(off);
out_hash_data.push_back(iph->src_ip.ip);
out_hash_data.push_back(iph->dst_ip.ip);
auto h = iph->ntoh();
auto l4 = _l4[h.ip_proto];
if (l4) {
if (h.mf() == false && h.offset() == 0) {
// This IP datagram is atomic, forward according to tcp connection hash
l4->forward(out_hash_data, p, off + sizeof(ip_hdr));
}
// else forward according to ip fields only
}
return true;
}
int ipv4::handle_received_packet(Packet p, ethernet_address from)
{
auto iph = p.get_header<ip_hdr>(0);
if (!iph) {
return 0;
}
// Skip checking csum of reassembled IP datagram
if (!get_hw_features().rx_csum_offload && !p.offload_info_ref().reassembled) {
checksummer csum;
csum.sum(reinterpret_cast<char*>(iph), sizeof(*iph));
if (csum.get() != 0) {
return 0;
}
}
auto h = iph->ntoh();
unsigned ip_len = h.len;
unsigned ip_hdr_len = h.ihl * 4;
unsigned pkt_len = p.len();
auto offset = h.offset();
ldout(cct, 10) << __func__ << " get " << std::hex << int(h.ip_proto)
<< std::dec << " packet from "
<< h.src_ip << " -> " << h.dst_ip << " id=" << h.id
<< " ip_len=" << ip_len << " ip_hdr_len=" << ip_hdr_len
<< " pkt_len=" << pkt_len << " offset=" << offset << dendl;
if (pkt_len > ip_len) {
// Trim extra data in the packet beyond IP total length
p.trim_back(pkt_len - ip_len);
} else if (pkt_len < ip_len) {
// Drop if it contains less than IP total length
return 0;
}
// Drop if the reassembled datagram will be larger than maximum IP size
if (offset + p.len() > ip_packet_len_max) {
return 0;
}
// FIXME: process options
if (in_my_netmask(h.src_ip) && h.src_ip != _host_address) {
ldout(cct, 20) << __func__ << " learn mac " << from << " with " << h.src_ip << dendl;
_arp.learn(from, h.src_ip);
}
if (_packet_filter) {
bool handled = false;
_packet_filter->handle(p, &h, from, handled);
if (handled) {
return 0;
}
}
if (h.dst_ip != _host_address) {
// FIXME: forward
return 0;
}
// Does this IP datagram need reassembly
auto mf = h.mf();
if (mf == true || offset != 0) {
frag_limit_mem();
auto frag_id = ipv4_frag_id{h.src_ip, h.dst_ip, h.id, h.ip_proto};
auto& frag = _frags[frag_id];
if (mf == false) {
frag.last_frag_received = true;
}
// This is a newly created frag_id
if (frag.mem_size == 0) {
_frags_age.push_back(frag_id);
frag.rx_time = ceph_clock_now();
}
auto added_size = frag.merge(h, offset, std::move(p));
_frag_mem += added_size;
if (frag.is_complete()) {
// All the fragments are received
auto dropped_size = frag.mem_size;
auto& ip_data = frag.data.map.begin()->second;
// Choose a cpu to forward this packet
auto cpu_id = center->get_id();
auto l4 = _l4[h.ip_proto];
if (l4) {
size_t l4_offset = 0;
forward_hash hash_data;
hash_data.push_back(hton(h.src_ip.ip));
hash_data.push_back(hton(h.dst_ip.ip));
l4->forward(hash_data, ip_data, l4_offset);
cpu_id = _netif->hash2cpu(toeplitz_hash(_netif->rss_key(), hash_data));
}
// No need to forward if the dst cpu is the current cpu
if (cpu_id == center->get_id()) {
l4->received(std::move(ip_data), h.src_ip, h.dst_ip);
} else {
auto to = _netif->hw_address();
auto pkt = frag.get_assembled_packet(from, to);
_netif->forward(center, cpu_id, std::move(pkt));
}
// Delete this frag from _frags and _frags_age
frag_drop(frag_id, dropped_size);
_frags_age.remove(frag_id);
perf_logger->set(l_dpdk_total_linearize_operations,
ipv4_packet_merger::linearizations());
} else {
// Some of the fragments are missing
if (frag_timefd) {
frag_arm();
}
}
return 0;
}
auto l4 = _l4[h.ip_proto];
if (l4) {
// Trim IP header and pass to upper layer
p.trim_front(ip_hdr_len);
l4->received(std::move(p), h.src_ip, h.dst_ip);
}
return 0;
}
void ipv4::wait_l2_dst_address(ipv4_address to, Packet p, resolution_cb cb) {
// Figure out where to send the packet to. If it is a directly connected
// host, send to it directly, otherwise send to the default gateway.
ipv4_address dst;
if (in_my_netmask(to)) {
dst = to;
} else {
dst = _gw_address;
}
_arp.wait(std::move(dst), std::move(p), std::move(cb));
}
const hw_features& ipv4::get_hw_features() const
{
return _netif->get_hw_features();
}
void ipv4::send(ipv4_address to, ip_protocol_num proto_num,
Packet p, ethernet_address e_dst) {
auto needs_frag = this->needs_frag(p, proto_num, get_hw_features());
auto send_pkt = [this, to, proto_num, needs_frag, e_dst] (Packet& pkt, uint16_t remaining, uint16_t offset) mutable {
static uint16_t id = 0;
auto iph = pkt.prepend_header<ip_hdr>();
iph->ihl = sizeof(*iph) / 4;
iph->ver = 4;
iph->dscp = 0;
iph->ecn = 0;
iph->len = pkt.len();
// FIXME: a proper id
iph->id = id++;
if (needs_frag) {
uint16_t mf = remaining > 0;
// The fragment offset is measured in units of 8 octets (64 bits)
auto off = offset / 8;
iph->frag = (mf << uint8_t(ip_hdr::frag_bits::mf)) | off;
} else {
iph->frag = 0;
}
iph->ttl = 64;
iph->ip_proto = (uint8_t)proto_num;
iph->csum = 0;
iph->src_ip = _host_address;
iph->dst_ip = to;
ldout(cct, 20) << " ipv4::send " << " id=" << iph->id << " " << _host_address << " -> " << to
<< " len " << pkt.len() << dendl;
*iph = iph->hton();
if (get_hw_features().tx_csum_ip_offload) {
iph->csum = 0;
pkt.offload_info_ref().needs_ip_csum = true;
} else {
checksummer csum;
csum.sum(reinterpret_cast<char*>(iph), sizeof(*iph));
iph->csum = csum.get();
}
_packetq.push_back(
l3_protocol::l3packet{eth_protocol_num::ipv4, e_dst, std::move(pkt)});
};
if (needs_frag) {
uint16_t offset = 0;
uint16_t remaining = p.len();
auto mtu = get_hw_features().mtu;
while (remaining) {
auto can_send = std::min(uint16_t(mtu - ipv4_hdr_len_min), remaining);
remaining -= can_send;
auto pkt = p.share(offset, can_send);
send_pkt(pkt, remaining, offset);
offset += can_send;
}
} else {
// The whole packet can be send in one shot
send_pkt(p, 0, 0);
}
}
std::optional<l3_protocol::l3packet> ipv4::get_packet() {
// _packetq will be mostly empty here unless it hold remnants of previously
// fragmented packet
if (_packetq.empty()) {
for (size_t i = 0; i < _pkt_providers.size(); i++) {
auto l4p = _pkt_providers[_pkt_provider_idx++]();
if (_pkt_provider_idx == _pkt_providers.size()) {
_pkt_provider_idx = 0;
}
if (l4p) {
ldout(cct, 20) << " ipv4::get_packet len " << l4p->p.len() << dendl;
send(l4p->to, l4p->proto_num, std::move(l4p->p), l4p->e_dst);
break;
}
}
}
std::optional<l3_protocol::l3packet> p;
if (!_packetq.empty()) {
p = std::move(_packetq.front());
_packetq.pop_front();
}
return p;
}
void ipv4::frag_limit_mem() {
if (_frag_mem <= _frag_high_thresh) {
return;
}
auto drop = _frag_mem - _frag_low_thresh;
while (drop) {
if (_frags_age.empty()) {
return;
}
// Drop the oldest frag (first element) from _frags_age
auto frag_id = _frags_age.front();
_frags_age.pop_front();
// Drop from _frags as well
auto& frag = _frags[frag_id];
auto dropped_size = frag.mem_size;
frag_drop(frag_id, dropped_size);
drop -= std::min(drop, dropped_size);
}
}
void ipv4::frag_timeout() {
if (_frags.empty()) {
return;
}
auto now = ceph_clock_now();
for (auto it = _frags_age.begin(); it != _frags_age.end();) {
auto frag_id = *it;
auto& frag = _frags[frag_id];
if (now > frag.rx_time + _frag_timeout) {
auto dropped_size = frag.mem_size;
// Drop from _frags
frag_drop(frag_id, dropped_size);
// Drop from _frags_age
it = _frags_age.erase(it);
} else {
// The further items can only be younger
break;
}
}
if (_frags.size() != 0) {
frag_arm(now);
} else {
_frag_mem = 0;
}
}
int32_t ipv4::frag::merge(ip_hdr &h, uint16_t offset, Packet p) {
uint32_t old = mem_size;
unsigned ip_hdr_len = h.ihl * 4;
// Store IP header
if (offset == 0) {
header = p.share(0, ip_hdr_len);
}
// Sotre IP payload
p.trim_front(ip_hdr_len);
data.merge(offset, std::move(p));
// Update mem size
mem_size = header.memory();
for (const auto& x : data.map) {
mem_size += x.second.memory();
}
auto added_size = mem_size - old;
return added_size;
}
bool ipv4::frag::is_complete() {
// If all the fragments are received, ipv4::frag::merge() should merge all
// the fragments into a single packet
auto offset = data.map.begin()->first;
auto nr_packet = data.map.size();
return last_frag_received && nr_packet == 1 && offset == 0;
}
Packet ipv4::frag::get_assembled_packet(ethernet_address from, ethernet_address to) {
auto& ip_header = header;
auto& ip_data = data.map.begin()->second;
// Append a ethernet header, needed for forwarding
auto eh = ip_header.prepend_header<eth_hdr>();
eh->src_mac = from;
eh->dst_mac = to;
eh->eth_proto = uint16_t(eth_protocol_num::ipv4);
*eh = eh->hton();
// Prepare a packet contains both ethernet header, ip header and ip data
ip_header.append(std::move(ip_data));
auto pkt = std::move(ip_header);
auto iph = pkt.get_header<ip_hdr>(sizeof(eth_hdr));
// len is the sum of each fragment
iph->len = hton(uint16_t(pkt.len() - sizeof(eth_hdr)));
// No fragmentation for the assembled datagram
iph->frag = 0;
// Since each fragment's csum is checked, no need to csum
// again for the assembled datagram
offload_info oi;
oi.reassembled = true;
pkt.set_offload_info(oi);
return pkt;
}
void icmp::received(Packet p, ipaddr from, ipaddr to) {
auto hdr = p.get_header<icmp_hdr>(0);
if (!hdr || hdr->type != icmp_hdr::msg_type::echo_request) {
return;
}
hdr->type = icmp_hdr::msg_type::echo_reply;
hdr->code = 0;
hdr->csum = 0;
checksummer csum;
csum.sum(reinterpret_cast<char*>(hdr), p.len());
hdr->csum = csum.get();
if (_queue_space.get_or_fail(p.len())) { // drop packets that do not fit the queue
auto cb = [this, from] (const ethernet_address e_dst, Packet p, int r) mutable {
if (r == 0) {
_packetq.emplace_back(ipv4_traits::l4packet{from, std::move(p), e_dst, ip_protocol_num::icmp});
}
};
_inet.wait_l2_dst_address(from, std::move(p), cb);
}
}
| 14,372 | 28.819502 | 132 |
cc
|
null |
ceph-main/src/msg/async/dpdk/IP.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*
*/
#ifndef CEPH_MSG_IP_H_
#define CEPH_MSG_IP_H_
#include <arpa/inet.h>
#include <unordered_map>
#include <cstdint>
#include <array>
#include <map>
#include <list>
#include <chrono>
#include "msg/async/Event.h"
#include "common/Throttle.h"
#include "array_map.h"
#include "ARP.h"
#include "IPChecksum.h"
#include "ip_types.h"
#include "const.h"
#include "net.h"
#include "PacketUtil.h"
#include "toeplitz.h"
class ipv4;
template <ip_protocol_num ProtoNum>
class ipv4_l4;
template <typename InetTraits>
class tcp;
struct ipv4_traits {
using address_type = ipv4_address;
using inet_type = ipv4_l4<ip_protocol_num::tcp>;
struct l4packet {
ipv4_address to;
Packet p;
ethernet_address e_dst;
ip_protocol_num proto_num;
};
using packet_provider_type = std::function<std::optional<l4packet> ()>;
static void tcp_pseudo_header_checksum(checksummer& csum, ipv4_address src, ipv4_address dst, uint16_t len) {
csum.sum_many(src.ip, dst.ip, uint8_t(0), uint8_t(ip_protocol_num::tcp), len);
}
static constexpr uint8_t ip_hdr_len_min = ipv4_hdr_len_min;
};
template <ip_protocol_num ProtoNum>
class ipv4_l4 {
public:
ipv4& _inet;
public:
ipv4_l4(ipv4& inet) : _inet(inet) {}
void register_packet_provider(ipv4_traits::packet_provider_type func);
void wait_l2_dst_address(ipv4_address to, Packet p, resolution_cb cb);
};
class ip_protocol {
public:
virtual ~ip_protocol() {}
virtual void received(Packet p, ipv4_address from, ipv4_address to) = 0;
virtual bool forward(forward_hash& out_hash_data, Packet& p, size_t off) { return true; }
};
template <typename InetTraits>
struct l4connid {
using ipaddr = typename InetTraits::address_type;
using inet_type = typename InetTraits::inet_type;
struct connid_hash;
ipaddr local_ip;
ipaddr foreign_ip;
uint16_t local_port;
uint16_t foreign_port;
bool operator==(const l4connid& x) const {
return local_ip == x.local_ip
&& foreign_ip == x.foreign_ip
&& local_port == x.local_port
&& foreign_port == x.foreign_port;
}
uint32_t hash(const rss_key_type& rss_key) {
forward_hash hash_data;
hash_data.push_back(hton(foreign_ip.ip));
hash_data.push_back(hton(local_ip.ip));
hash_data.push_back(hton(foreign_port));
hash_data.push_back(hton(local_port));
return toeplitz_hash(rss_key, hash_data);
}
};
class ipv4_tcp final : public ip_protocol {
ipv4_l4<ip_protocol_num::tcp> _inet_l4;
std::unique_ptr<tcp<ipv4_traits>> _tcp;
public:
ipv4_tcp(ipv4& inet, EventCenter *c);
~ipv4_tcp();
virtual void received(Packet p, ipv4_address from, ipv4_address to) override;
virtual bool forward(forward_hash& out_hash_data, Packet& p, size_t off) override;
friend class ipv4;
};
class icmp {
public:
using ipaddr = ipv4_address;
using inet_type = ipv4_l4<ip_protocol_num::icmp>;
explicit icmp(CephContext *c, inet_type& inet)
: cct(c), _inet(inet), _queue_space(c, "DPDK::icmp::_queue_space", 212992) {
_inet.register_packet_provider([this] {
std::optional<ipv4_traits::l4packet> l4p;
if (!_packetq.empty()) {
l4p = std::move(_packetq.front());
_packetq.pop_front();
_queue_space.put(l4p->p.len());
}
return l4p;
});
}
void received(Packet p, ipaddr from, ipaddr to);
private:
CephContext *cct;
// ipv4_l4<ip_protocol_num::icmp>
inet_type& _inet;
circular_buffer<ipv4_traits::l4packet> _packetq;
Throttle _queue_space;
};
class ipv4_icmp final : public ip_protocol {
CephContext *cct;
ipv4_l4<ip_protocol_num::icmp> _inet_l4;
icmp _icmp;
public:
ipv4_icmp(CephContext *c, ipv4& inet) : cct(c), _inet_l4(inet), _icmp(c, _inet_l4) {}
virtual void received(Packet p, ipv4_address from, ipv4_address to) override {
_icmp.received(std::move(p), from, to);
}
friend class ipv4;
};
struct ip_hdr;
struct ip_packet_filter {
virtual ~ip_packet_filter() {};
virtual void handle(Packet& p, ip_hdr* iph, ethernet_address from, bool & handled) = 0;
};
struct ipv4_frag_id {
struct hash;
ipv4_address src_ip;
ipv4_address dst_ip;
uint16_t identification;
uint8_t protocol;
bool operator==(const ipv4_frag_id& x) const {
return src_ip == x.src_ip &&
dst_ip == x.dst_ip &&
identification == x.identification &&
protocol == x.protocol;
}
};
struct ipv4_frag_id::hash : private std::hash<ipv4_address>,
private std::hash<uint16_t>, private std::hash<uint8_t> {
size_t operator()(const ipv4_frag_id& id) const noexcept {
using h1 = std::hash<ipv4_address>;
using h2 = std::hash<uint16_t>;
using h3 = std::hash<uint8_t>;
return h1::operator()(id.src_ip) ^
h1::operator()(id.dst_ip) ^
h2::operator()(id.identification) ^
h3::operator()(id.protocol);
}
};
struct ipv4_tag {};
using ipv4_packet_merger = packet_merger<uint32_t, ipv4_tag>;
class interface;
class ipv4 {
public:
using address_type = ipv4_address;
using proto_type = uint16_t;
static address_type broadcast_address() { return ipv4_address(0xffffffff); }
static proto_type arp_protocol_type() { return proto_type(eth_protocol_num::ipv4); }
CephContext *cct;
EventCenter *center;
private:
interface* _netif;
std::vector<ipv4_traits::packet_provider_type> _pkt_providers;
std::optional<uint64_t> frag_timefd;
EventCallbackRef frag_handler;
arp _global_arp;
arp_for<ipv4> _arp;
ipv4_address _host_address;
ipv4_address _gw_address;
ipv4_address _netmask;
l3_protocol _l3;
subscription<Packet, ethernet_address> _rx_packets;
ipv4_tcp _tcp;
ipv4_icmp _icmp;
array_map<ip_protocol*, 256> _l4;
ip_packet_filter *_packet_filter;
struct frag {
Packet header;
ipv4_packet_merger data;
utime_t rx_time;
uint32_t mem_size = 0;
// fragment with MF == 0 inidates it is the last fragment
bool last_frag_received = false;
Packet get_assembled_packet(ethernet_address from, ethernet_address to);
int32_t merge(ip_hdr &h, uint16_t offset, Packet p);
bool is_complete();
};
std::unordered_map<ipv4_frag_id, frag, ipv4_frag_id::hash> _frags;
std::list<ipv4_frag_id> _frags_age;
static utime_t _frag_timeout;
static constexpr uint32_t _frag_low_thresh{3 * 1024 * 1024};
static constexpr uint32_t _frag_high_thresh{4 * 1024 * 1024};
uint32_t _frag_mem = 0;
circular_buffer<l3_protocol::l3packet> _packetq;
unsigned _pkt_provider_idx = 0;
PerfCounters *perf_logger;
private:
int handle_received_packet(Packet p, ethernet_address from);
bool forward(forward_hash& out_hash_data, Packet& p, size_t off);
std::optional<l3_protocol::l3packet> get_packet();
bool in_my_netmask(ipv4_address a) const {
return !((a.ip ^ _host_address.ip) & _netmask.ip);
}
void frag_limit_mem();
void frag_drop(ipv4_frag_id frag_id, uint32_t dropped_size) {
_frags.erase(frag_id);
_frag_mem -= dropped_size;
}
void frag_arm(utime_t now) {
auto tp = now + _frag_timeout;
frag_timefd = center->create_time_event(tp.to_nsec() / 1000, frag_handler);
}
void frag_arm() {
auto now = ceph_clock_now();
frag_timefd = center->create_time_event(now.to_nsec() / 1000, frag_handler);
}
public:
void frag_timeout();
public:
explicit ipv4(CephContext *c, EventCenter *cen, interface* netif);
~ipv4() {
delete frag_handler;
}
void set_host_address(ipv4_address ip) {
_host_address = ip;
_arp.set_self_addr(ip);
}
ipv4_address host_address() {
return _host_address;
}
void set_gw_address(ipv4_address ip) {
_gw_address = ip;
}
ipv4_address gw_address() const {
return _gw_address;
}
void set_netmask_address(ipv4_address ip) {
_netmask = ip;
}
ipv4_address netmask_address() const {
return _netmask;
}
interface *netif() const {
return _netif;
}
// TODO or something. Should perhaps truly be a list
// of filters. With ordering. And blackjack. Etc.
// But for now, a simple single raw pointer suffices
void set_packet_filter(ip_packet_filter *f) {
_packet_filter = f;
}
ip_packet_filter * packet_filter() const {
return _packet_filter;
}
void send(ipv4_address to, ip_protocol_num proto_num, Packet p, ethernet_address e_dst);
tcp<ipv4_traits>& get_tcp() { return *_tcp._tcp; }
void register_l4(proto_type id, ip_protocol* handler);
const hw_features& get_hw_features() const;
static bool needs_frag(Packet& p, ip_protocol_num proto_num, hw_features hw_features) {
if (p.len() + ipv4_hdr_len_min <= hw_features.mtu)
return false;
if ((proto_num == ip_protocol_num::tcp && hw_features.tx_tso))
return false;
return true;
}
void learn(ethernet_address l2, ipv4_address l3) {
_arp.learn(l2, l3);
}
void register_packet_provider(ipv4_traits::packet_provider_type&& func) {
_pkt_providers.push_back(std::move(func));
}
void wait_l2_dst_address(ipv4_address to, Packet p, resolution_cb cb);
};
template <ip_protocol_num ProtoNum>
inline void ipv4_l4<ProtoNum>::register_packet_provider(
ipv4_traits::packet_provider_type func) {
_inet.register_packet_provider([func] {
auto l4p = func();
if (l4p) {
(*l4p).proto_num = ProtoNum;
}
return l4p;
});
}
template <ip_protocol_num ProtoNum>
inline void ipv4_l4<ProtoNum>::wait_l2_dst_address(ipv4_address to, Packet p, resolution_cb cb) {
_inet.wait_l2_dst_address(to, std::move(p), std::move(cb));
}
struct ip_hdr {
uint8_t ihl : 4;
uint8_t ver : 4;
uint8_t dscp : 6;
uint8_t ecn : 2;
uint16_t len;
uint16_t id;
uint16_t frag;
enum class frag_bits : uint8_t { mf = 13, df = 14, reserved = 15, offset_shift = 3 };
uint8_t ttl;
uint8_t ip_proto;
uint16_t csum;
ipv4_address src_ip;
ipv4_address dst_ip;
uint8_t options[0];
ip_hdr hton() {
ip_hdr hdr = *this;
hdr.len = ::hton(len);
hdr.id = ::hton(id);
hdr.frag = ::hton(frag);
hdr.csum = ::hton(csum);
hdr.src_ip.ip = ::hton(src_ip.ip);
hdr.dst_ip.ip = ::hton(dst_ip.ip);
return hdr;
}
ip_hdr ntoh() {
ip_hdr hdr = *this;
hdr.len = ::ntoh(len);
hdr.id = ::ntoh(id);
hdr.frag = ::ntoh(frag);
hdr.csum = ::ntoh(csum);
hdr.src_ip = src_ip.ntoh();
hdr.dst_ip = dst_ip.ntoh();
return hdr;
}
bool mf() { return frag & (1 << uint8_t(frag_bits::mf)); }
bool df() { return frag & (1 << uint8_t(frag_bits::df)); }
uint16_t offset() { return frag << uint8_t(frag_bits::offset_shift); }
} __attribute__((packed));
template <typename InetTraits>
struct l4connid<InetTraits>::connid_hash : private std::hash<ipaddr>, private std::hash<uint16_t> {
size_t operator()(const l4connid<InetTraits>& id) const noexcept {
using h1 = std::hash<ipaddr>;
using h2 = std::hash<uint16_t>;
return h1::operator()(id.local_ip)
^ h1::operator()(id.foreign_ip)
^ h2::operator()(id.local_port)
^ h2::operator()(id.foreign_port);
}
};
#endif /* CEPH_MSG_IP_H */
| 11,881 | 28.410891 | 111 |
h
|
null |
ceph-main/src/msg/async/dpdk/IPChecksum.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#include <arpa/inet.h>
#include "net.h"
#include "IPChecksum.h"
void checksummer::sum(const char* data, size_t len) {
auto orig_len = len;
if (odd) {
csum += uint8_t(*data++);
--len;
}
auto p64 = reinterpret_cast<const uint64_t*>(data);
while (len >= 8) {
csum += ntohq(*p64++);
len -= 8;
}
auto p16 = reinterpret_cast<const uint16_t*>(p64);
while (len >= 2) {
csum += ntohs(*p16++);
len -= 2;
}
auto p8 = reinterpret_cast<const uint8_t*>(p16);
if (len) {
csum += *p8++ << 8;
len -= 1;
}
odd ^= orig_len & 1;
}
uint16_t checksummer::get() const {
__int128 csum1 = (csum & 0xffffffffffffffff) + (csum >> 64);
uint64_t csum = (csum1 & 0xffffffffffffffff) + (csum1 >> 64);
csum = (csum & 0xffff) + ((csum >> 16) & 0xffff) + ((csum >> 32) & 0xffff) + (csum >> 48);
csum = (csum & 0xffff) + (csum >> 16);
csum = (csum & 0xffff) + (csum >> 16);
return htons(~csum);
}
void checksummer::sum(const Packet& p) {
for (auto&& f : p.fragments()) {
sum(f.base, f.size);
}
}
uint16_t ip_checksum(const void* data, size_t len) {
checksummer cksum;
cksum.sum(reinterpret_cast<const char*>(data), len);
return cksum.get();
}
| 2,055 | 27.957746 | 92 |
cc
|
null |
ceph-main/src/msg/async/dpdk/IPChecksum.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#ifndef CEPH_MSG_CHECKSUM_H_
#define CEPH_MSG_CHECKSUM_H_
#include <cstdint>
#include <cstddef>
#include <arpa/inet.h>
#include "Packet.h"
uint16_t ip_checksum(const void* data, size_t len);
struct checksummer {
__int128 csum = 0;
bool odd = false;
void sum(const char* data, size_t len);
void sum(const Packet& p);
void sum(uint8_t data) {
if (!odd) {
csum += data << 8;
} else {
csum += data;
}
odd = !odd;
}
void sum(uint16_t data) {
if (odd) {
sum(uint8_t(data >> 8));
sum(uint8_t(data));
} else {
csum += data;
}
}
void sum(uint32_t data) {
if (odd) {
sum(uint16_t(data));
sum(uint16_t(data >> 16));
} else {
csum += data;
}
}
void sum_many() {}
template <typename T0, typename... T>
void sum_many(T0 data, T... rest) {
sum(data);
sum_many(rest...);
}
uint16_t get() const;
};
#endif /* CEPH_MSG_CHECKSUM_H_ */
| 1,807 | 23.767123 | 79 |
h
|
null |
ceph-main/src/msg/async/dpdk/Packet.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include <algorithm>
#include <cctype>
#include "capture.h"
#include "Packet.h"
constexpr size_t Packet::internal_data_size;
constexpr size_t Packet::default_nr_frags;
void Packet::linearize(size_t at_frag, size_t desired_size) {
_impl->unuse_internal_data();
size_t nr_frags = 0;
size_t accum_size = 0;
while (accum_size < desired_size) {
accum_size += _impl->frags[at_frag + nr_frags].size;
++nr_frags;
}
char *new_frag = new char[accum_size];
auto p = new_frag;
for (size_t i = 0; i < nr_frags; ++i) {
auto& f = _impl->frags[at_frag + i];
p = std::copy(f.base, f.base + f.size, p);
}
// collapse nr_frags into one fragment
std::copy(_impl->frags + at_frag + nr_frags, _impl->frags + _impl->_nr_frags,
_impl->frags + at_frag + 1);
_impl->_nr_frags -= nr_frags - 1;
_impl->frags[at_frag] = fragment{new_frag, accum_size};
if (at_frag == 0 && desired_size == len()) {
// We can drop the old buffer safely
auto x = std::move(_impl->_deleter);
_impl->_deleter = make_deleter([new_frag] { delete []new_frag; });
} else {
auto del = std::bind(
[new_frag](deleter &d) { delete []new_frag; }, std::move(_impl->_deleter));
_impl->_deleter = make_deleter(std::move(del));
}
}
class C_free_on_cpu : public EventCallback {
deleter del;
std::function<void()> cb;
public:
C_free_on_cpu(deleter &&d, std::function<void()> &&c):
del(std::move(d)), cb(std::move(c)) {}
void do_request(uint64_t fd) {
// deleter needs to be moved from lambda capture to be destroyed here
// otherwise deleter destructor will be called on a cpu that called
// create_external_event when work_item is destroyed.
deleter xxx(std::move(del));
cb();
delete this;
}
};
Packet Packet::free_on_cpu(EventCenter *center, std::function<void()> cb)
{
auto del = std::bind(
[center, cb] (deleter &del) mutable {
center->dispatch_event_external(new C_free_on_cpu(std::move(del), std::move(cb)));
}, std::move(_impl->_deleter));
// make new deleter that runs old deleter on an origin cpu
_impl->_deleter = make_deleter(deleter(), std::move(del));
return Packet(impl::copy(_impl.get()));
}
std::ostream& operator<<(std::ostream& os, const Packet& p) {
os << "Packet{";
bool first = true;
for (auto&& frag : p.fragments()) {
if (!first) {
os << ", ";
}
first = false;
if (std::all_of(frag.base, frag.base + frag.size, [] (int c) { return c >= 9 && c <= 0x7f; })) {
os << '"';
for (auto p = frag.base; p != frag.base + frag.size; ++p) {
auto c = *p;
if (isprint(c)) {
os << c;
} else if (c == '\r') {
os << "\\r";
} else if (c == '\n') {
os << "\\n";
} else if (c == '\t') {
os << "\\t";
} else {
uint8_t b = c;
os << "\\x" << (b / 16) << (b % 16);
}
}
os << '"';
} else {
os << "{";
bool nfirst = true;
for (auto p = frag.base; p != frag.base + frag.size; ++p) {
if (!nfirst) {
os << " ";
}
nfirst = false;
uint8_t b = *p;
os << b;
}
os << "}";
}
}
os << "}";
return os;
}
| 4,497 | 29.598639 | 100 |
cc
|
null |
ceph-main/src/msg/async/dpdk/Packet.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#ifndef CEPH_MSG_PACKET_H_
#define CEPH_MSG_PACKET_H_
#include <vector>
#include <algorithm>
#include <iosfwd>
#include "include/types.h"
#include "common/deleter.h"
#include "msg/async/Event.h"
#include "const.h"
struct fragment {
char* base;
size_t size;
};
struct offload_info {
ip_protocol_num protocol = ip_protocol_num::unused;
bool needs_csum = false;
uint8_t ip_hdr_len = 20;
uint8_t tcp_hdr_len = 20;
uint8_t udp_hdr_len = 8;
bool needs_ip_csum = false;
bool reassembled = false;
uint16_t tso_seg_size = 0;
// HW stripped VLAN header (CPU order)
std::optional<uint16_t> vlan_tci;
};
// Zero-copy friendly packet class
//
// For implementing zero-copy, we need a flexible destructor that can
// destroy packet data in different ways: decrementing a reference count,
// or calling a free()-like function.
//
// Moreover, we need different destructors for each set of fragments within
// a single fragment. For example, a header and trailer might need delete[]
// to be called, while the internal data needs a reference count to be
// released. Matters are complicated in that fragments can be split
// (due to virtual/physical translation).
//
// To implement this, we associate each packet with a single destructor,
// but allow composing a packet from another packet plus a fragment to
// be added, with its own destructor, causing the destructors to be chained.
//
// The downside is that the data needed for the destructor is duplicated,
// if it is already available in the fragment itself.
//
// As an optimization, when we allocate small fragments, we allocate some
// extra space, so prepending to the packet does not require extra
// allocations. This is useful when adding headers.
//
class Packet {
// enough for lots of headers, not quite two cache lines:
static constexpr size_t internal_data_size = 128 - 16;
static constexpr size_t default_nr_frags = 4;
struct pseudo_vector {
fragment* _start;
fragment* _finish;
pseudo_vector(fragment* start, size_t nr)
: _start(start), _finish(_start + nr) {}
fragment* begin() { return _start; }
fragment* end() { return _finish; }
fragment& operator[](size_t idx) { return _start[idx]; }
};
struct impl {
// when destroyed, virtual destructor will reclaim resources
deleter _deleter;
unsigned _len = 0;
uint16_t _nr_frags = 0;
uint16_t _allocated_frags;
offload_info _offload_info;
std::optional<uint32_t> rss_hash;
char data[internal_data_size]; // only frags[0] may use
unsigned headroom = internal_data_size; // in data
// FIXME: share data/frags space
fragment frags[];
explicit impl(size_t nr_frags = default_nr_frags);
impl(const impl&) = delete;
impl(fragment frag, size_t nr_frags = default_nr_frags);
pseudo_vector fragments() { return { frags, _nr_frags }; }
static std::unique_ptr<impl> allocate(size_t nr_frags) {
nr_frags = std::max(nr_frags, default_nr_frags);
return std::unique_ptr<impl>(new (nr_frags) impl(nr_frags));
}
static std::unique_ptr<impl> copy(impl* old, size_t nr) {
auto n = allocate(nr);
n->_deleter = std::move(old->_deleter);
n->_len = old->_len;
n->_nr_frags = old->_nr_frags;
n->headroom = old->headroom;
n->_offload_info = old->_offload_info;
n->rss_hash = old->rss_hash;
std::copy(old->frags, old->frags + old->_nr_frags, n->frags);
old->copy_internal_fragment_to(n.get());
return n;
}
static std::unique_ptr<impl> copy(impl* old) {
return copy(old, old->_nr_frags);
}
static std::unique_ptr<impl> allocate_if_needed(std::unique_ptr<impl> old, size_t extra_frags) {
if (old->_allocated_frags >= old->_nr_frags + extra_frags) {
return old;
}
return copy(old.get(), std::max<size_t>(old->_nr_frags + extra_frags, 2 * old->_nr_frags));
}
void* operator new(size_t size, size_t nr_frags = default_nr_frags) {
ceph_assert(nr_frags == uint16_t(nr_frags));
return ::operator new(size + nr_frags * sizeof(fragment));
}
// Matching the operator new above
void operator delete(void* ptr, size_t nr_frags) {
return ::operator delete(ptr);
}
// Since the above "placement delete" hides the global one, expose it
void operator delete(void* ptr) {
return ::operator delete(ptr);
}
bool using_internal_data() const {
return _nr_frags
&& frags[0].base >= data
&& frags[0].base < data + internal_data_size;
}
void unuse_internal_data() {
if (!using_internal_data()) {
return;
}
auto buf = static_cast<char*>(::malloc(frags[0].size));
if (!buf) {
throw std::bad_alloc();
}
deleter d = make_free_deleter(buf);
std::copy(frags[0].base, frags[0].base + frags[0].size, buf);
frags[0].base = buf;
_deleter.append(std::move(d));
headroom = internal_data_size;
}
void copy_internal_fragment_to(impl* to) {
if (!using_internal_data()) {
return;
}
to->frags[0].base = to->data + headroom;
std::copy(frags[0].base, frags[0].base + frags[0].size,
to->frags[0].base);
}
};
explicit Packet(std::unique_ptr<impl>&& impl) : _impl(std::move(impl)) {}
std::unique_ptr<impl> _impl;
public:
static Packet from_static_data(const char* data, size_t len) {
return {fragment{const_cast<char*>(data), len}, deleter()};
}
// build empty Packet
Packet();
// build empty Packet with nr_frags allocated
explicit Packet(size_t nr_frags);
// move existing Packet
Packet(Packet&& x) noexcept;
// copy data into Packet
Packet(const char* data, size_t len);
// copy data into Packet
explicit Packet(fragment frag);
// zero-copy single fragment
Packet(fragment frag, deleter del);
// zero-copy multiple fragments
Packet(std::vector<fragment> frag, deleter del);
// build Packet with iterator
template <typename Iterator>
Packet(Iterator begin, Iterator end, deleter del);
// append fragment (copying new fragment)
Packet(Packet&& x, fragment frag);
// prepend fragment (copying new fragment, with header optimization)
Packet(fragment frag, Packet&& x);
// prepend fragment (zero-copy)
Packet(fragment frag, deleter del, Packet&& x);
// append fragment (zero-copy)
Packet(Packet&& x, fragment frag, deleter d);
// append deleter
Packet(Packet&& x, deleter d);
Packet& operator=(Packet&& x) {
if (this != &x) {
this->~Packet();
new (this) Packet(std::move(x));
}
return *this;
}
unsigned len() const { return _impl->_len; }
unsigned memory() const { return len() + sizeof(Packet::impl); }
fragment frag(unsigned idx) const { return _impl->frags[idx]; }
fragment& frag(unsigned idx) { return _impl->frags[idx]; }
unsigned nr_frags() const { return _impl->_nr_frags; }
pseudo_vector fragments() const { return { _impl->frags, _impl->_nr_frags }; }
fragment* fragment_array() const { return _impl->frags; }
// share Packet data (reference counted, non COW)
Packet share();
Packet share(size_t offset, size_t len);
void append(Packet&& p);
void trim_front(size_t how_much);
void trim_back(size_t how_much);
// get a header pointer, linearizing if necessary
template <typename Header>
Header* get_header(size_t offset = 0);
// get a header pointer, linearizing if necessary
char* get_header(size_t offset, size_t size);
// prepend a header (default-initializing it)
template <typename Header>
Header* prepend_header(size_t extra_size = 0);
// prepend a header (uninitialized!)
char* prepend_uninitialized_header(size_t size);
Packet free_on_cpu(EventCenter *c, std::function<void()> cb = []{});
void linearize() { return linearize(0, len()); }
void reset() { _impl.reset(); }
void reserve(int n_frags) {
if (n_frags > _impl->_nr_frags) {
auto extra = n_frags - _impl->_nr_frags;
_impl = impl::allocate_if_needed(std::move(_impl), extra);
}
}
std::optional<uint32_t> rss_hash() {
return _impl->rss_hash;
}
void set_rss_hash(uint32_t hash) {
_impl->rss_hash = hash;
}
private:
void linearize(size_t at_frag, size_t desired_size);
bool allocate_headroom(size_t size);
public:
class offload_info offload_info() const { return _impl->_offload_info; }
class offload_info& offload_info_ref() { return _impl->_offload_info; }
void set_offload_info(class offload_info oi) { _impl->_offload_info = oi; }
};
std::ostream& operator<<(std::ostream& os, const Packet& p);
inline Packet::Packet(Packet&& x) noexcept
: _impl(std::move(x._impl)) {
}
inline Packet::impl::impl(size_t nr_frags)
: _len(0), _allocated_frags(nr_frags) {
}
inline Packet::impl::impl(fragment frag, size_t nr_frags)
: _len(frag.size), _allocated_frags(nr_frags) {
ceph_assert(_allocated_frags > _nr_frags);
if (frag.size <= internal_data_size) {
headroom -= frag.size;
frags[0] = { data + headroom, frag.size };
} else {
auto buf = static_cast<char*>(::malloc(frag.size));
if (!buf) {
throw std::bad_alloc();
}
deleter d = make_free_deleter(buf);
frags[0] = { buf, frag.size };
_deleter.append(std::move(d));
}
std::copy(frag.base, frag.base + frag.size, frags[0].base);
++_nr_frags;
}
inline Packet::Packet(): _impl(impl::allocate(1)) {
}
inline Packet::Packet(size_t nr_frags): _impl(impl::allocate(nr_frags)) {
}
inline Packet::Packet(fragment frag): _impl(new impl(frag)) {
}
inline Packet::Packet(const char* data, size_t size):
Packet(fragment{const_cast<char*>(data), size}) {
}
inline Packet::Packet(fragment frag, deleter d)
: _impl(impl::allocate(1)) {
_impl->_deleter = std::move(d);
_impl->frags[_impl->_nr_frags++] = frag;
_impl->_len = frag.size;
}
inline Packet::Packet(std::vector<fragment> frag, deleter d)
: _impl(impl::allocate(frag.size())) {
_impl->_deleter = std::move(d);
std::copy(frag.begin(), frag.end(), _impl->frags);
_impl->_nr_frags = frag.size();
_impl->_len = 0;
for (auto&& f : _impl->fragments()) {
_impl->_len += f.size;
}
}
template <typename Iterator>
inline Packet::Packet(Iterator begin, Iterator end, deleter del) {
unsigned nr_frags = 0, len = 0;
nr_frags = std::distance(begin, end);
std::for_each(begin, end, [&] (fragment& frag) { len += frag.size; });
_impl = impl::allocate(nr_frags);
_impl->_deleter = std::move(del);
_impl->_len = len;
_impl->_nr_frags = nr_frags;
std::copy(begin, end, _impl->frags);
}
inline Packet::Packet(Packet&& x, fragment frag)
: _impl(impl::allocate_if_needed(std::move(x._impl), 1)) {
_impl->_len += frag.size;
char* buf = new char[frag.size];
std::copy(frag.base, frag.base + frag.size, buf);
_impl->frags[_impl->_nr_frags++] = {buf, frag.size};
_impl->_deleter = make_deleter(std::move(_impl->_deleter), [buf] {
delete[] buf;
});
}
inline bool Packet::allocate_headroom(size_t size) {
if (_impl->headroom >= size) {
_impl->_len += size;
if (!_impl->using_internal_data()) {
_impl = impl::allocate_if_needed(std::move(_impl), 1);
std::copy_backward(_impl->frags, _impl->frags + _impl->_nr_frags,
_impl->frags + _impl->_nr_frags + 1);
_impl->frags[0] = { _impl->data + internal_data_size, 0 };
++_impl->_nr_frags;
}
_impl->headroom -= size;
_impl->frags[0].base -= size;
_impl->frags[0].size += size;
return true;
} else {
return false;
}
}
inline Packet::Packet(fragment frag, Packet&& x)
: _impl(std::move(x._impl)) {
// try to prepend into existing internal fragment
if (allocate_headroom(frag.size)) {
std::copy(frag.base, frag.base + frag.size, _impl->frags[0].base);
return;
} else {
// didn't work out, allocate and copy
_impl->unuse_internal_data();
_impl = impl::allocate_if_needed(std::move(_impl), 1);
_impl->_len += frag.size;
char *buf = new char[frag.size];
std::copy(frag.base, frag.base + frag.size, buf);
std::copy_backward(_impl->frags, _impl->frags + _impl->_nr_frags,
_impl->frags + _impl->_nr_frags + 1);
++_impl->_nr_frags;
_impl->frags[0] = {buf, frag.size};
_impl->_deleter = make_deleter(
std::move(_impl->_deleter), [buf] { delete []buf; });
}
}
inline Packet::Packet(Packet&& x, fragment frag, deleter d)
: _impl(impl::allocate_if_needed(std::move(x._impl), 1)) {
_impl->_len += frag.size;
_impl->frags[_impl->_nr_frags++] = frag;
d.append(std::move(_impl->_deleter));
_impl->_deleter = std::move(d);
}
inline Packet::Packet(Packet&& x, deleter d): _impl(std::move(x._impl)) {
_impl->_deleter.append(std::move(d));
}
inline void Packet::append(Packet&& p) {
if (!_impl->_len) {
*this = std::move(p);
return;
}
_impl = impl::allocate_if_needed(std::move(_impl), p._impl->_nr_frags);
_impl->_len += p._impl->_len;
p._impl->unuse_internal_data();
std::copy(p._impl->frags, p._impl->frags + p._impl->_nr_frags,
_impl->frags + _impl->_nr_frags);
_impl->_nr_frags += p._impl->_nr_frags;
p._impl->_deleter.append(std::move(_impl->_deleter));
_impl->_deleter = std::move(p._impl->_deleter);
}
inline char* Packet::get_header(size_t offset, size_t size) {
if (offset + size > _impl->_len) {
return nullptr;
}
size_t i = 0;
while (i != _impl->_nr_frags && offset >= _impl->frags[i].size) {
offset -= _impl->frags[i++].size;
}
if (i == _impl->_nr_frags) {
return nullptr;
}
if (offset + size > _impl->frags[i].size) {
linearize(i, offset + size);
}
return _impl->frags[i].base + offset;
}
template <typename Header>
inline Header* Packet::get_header(size_t offset) {
return reinterpret_cast<Header*>(get_header(offset, sizeof(Header)));
}
inline void Packet::trim_front(size_t how_much) {
ceph_assert(how_much <= _impl->_len);
_impl->_len -= how_much;
size_t i = 0;
while (how_much && how_much >= _impl->frags[i].size) {
how_much -= _impl->frags[i++].size;
}
std::copy(_impl->frags + i, _impl->frags + _impl->_nr_frags, _impl->frags);
_impl->_nr_frags -= i;
if (!_impl->using_internal_data()) {
_impl->headroom = internal_data_size;
}
if (how_much) {
if (_impl->using_internal_data()) {
_impl->headroom += how_much;
}
_impl->frags[0].base += how_much;
_impl->frags[0].size -= how_much;
}
}
inline void Packet::trim_back(size_t how_much) {
ceph_assert(how_much <= _impl->_len);
_impl->_len -= how_much;
size_t i = _impl->_nr_frags - 1;
while (how_much && how_much >= _impl->frags[i].size) {
how_much -= _impl->frags[i--].size;
}
_impl->_nr_frags = i + 1;
if (how_much) {
_impl->frags[i].size -= how_much;
if (i == 0 && _impl->using_internal_data()) {
_impl->headroom += how_much;
}
}
}
template <typename Header>
Header* Packet::prepend_header(size_t extra_size) {
auto h = prepend_uninitialized_header(sizeof(Header) + extra_size);
return new (h) Header{};
}
// prepend a header (uninitialized!)
inline char* Packet::prepend_uninitialized_header(size_t size) {
if (!allocate_headroom(size)) {
// didn't work out, allocate and copy
_impl->unuse_internal_data();
// try again, after unuse_internal_data we may have space after all
if (!allocate_headroom(size)) {
// failed
_impl->_len += size;
_impl = impl::allocate_if_needed(std::move(_impl), 1);
char *buf = new char[size];
std::copy_backward(_impl->frags, _impl->frags + _impl->_nr_frags,
_impl->frags + _impl->_nr_frags + 1);
++_impl->_nr_frags;
_impl->frags[0] = {buf, size};
_impl->_deleter = make_deleter(std::move(_impl->_deleter),
[buf] { delete []buf; });
}
}
return _impl->frags[0].base;
}
inline Packet Packet::share() {
return share(0, _impl->_len);
}
inline Packet Packet::share(size_t offset, size_t len) {
_impl->unuse_internal_data(); // FIXME: eliminate?
Packet n;
n._impl = impl::allocate_if_needed(std::move(n._impl), _impl->_nr_frags);
size_t idx = 0;
while (offset > 0 && offset >= _impl->frags[idx].size) {
offset -= _impl->frags[idx++].size;
}
while (n._impl->_len < len) {
auto& f = _impl->frags[idx++];
auto fsize = std::min(len - n._impl->_len, f.size - offset);
n._impl->frags[n._impl->_nr_frags++] = { f.base + offset, fsize };
n._impl->_len += fsize;
offset = 0;
}
n._impl->_offload_info = _impl->_offload_info;
ceph_assert(!n._impl->_deleter);
n._impl->_deleter = _impl->_deleter.share();
return n;
}
#endif /* CEPH_MSG_PACKET_H_ */
| 17,562 | 30.932727 | 100 |
h
|
null |
ceph-main/src/msg/async/dpdk/PacketUtil.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#ifndef CEPH_MSG_PACKET_UTIL_H_
#define CEPH_MSG_PACKET_UTIL_H_
#include <map>
#include <iostream>
#include "Packet.h"
template <typename Offset, typename Tag>
class packet_merger {
private:
static uint64_t& linearizations_ref() {
static thread_local uint64_t linearization_count;
return linearization_count;
}
public:
std::map<Offset, Packet> map;
static uint64_t linearizations() {
return linearizations_ref();
}
void merge(Offset offset, Packet p) {
bool insert = true;
auto beg = offset;
auto end = beg + p.len();
// First, try to merge the packet with existing segment
for (auto it = map.begin(); it != map.end();) {
auto& seg_pkt = it->second;
auto seg_beg = it->first;
auto seg_end = seg_beg + seg_pkt.len();
// There are 6 cases:
if (seg_beg <= beg && end <= seg_end) {
// 1) seg_beg beg end seg_end
// We already have data in this packet
return;
} else if (beg <= seg_beg && seg_end <= end) {
// 2) beg seg_beg seg_end end
// The new segment contains more data than this old segment
// Delete the old one, insert the new one
it = map.erase(it);
insert = true;
break;
} else if (beg < seg_beg && seg_beg <= end && end <= seg_end) {
// 3) beg seg_beg end seg_end
// Merge two segments, trim front of old segment
auto trim = end - seg_beg;
seg_pkt.trim_front(trim);
p.append(std::move(seg_pkt));
// Delete the old one, insert the new one
it = map.erase(it);
insert = true;
break;
} else if (seg_beg <= beg && beg <= seg_end && seg_end < end) {
// 4) seg_beg beg seg_end end
// Merge two segments, trim front of new segment
auto trim = seg_end - beg;
p.trim_front(trim);
// Append new data to the old segment, keep the old segment
seg_pkt.append(std::move(p));
seg_pkt.linearize();
++linearizations_ref();
insert = false;
break;
} else {
// 5) beg end < seg_beg seg_end
// or
// 6) seg_beg seg_end < beg end
// Can not merge with this segment, keep looking
it++;
insert = true;
}
}
if (insert) {
p.linearize();
++linearizations_ref();
map.emplace(beg, std::move(p));
}
// Second, merge adjacent segments after this packet has been merged,
// because this packet might fill a "whole" and make two adjacent
// segments mergable
for (auto it = map.begin(); it != map.end();) {
// The first segment
auto& seg_pkt = it->second;
auto seg_beg = it->first;
auto seg_end = seg_beg + seg_pkt.len();
// The second segment
auto it_next = it;
it_next++;
if (it_next == map.end()) {
break;
}
auto& p = it_next->second;
auto beg = it_next->first;
auto end = beg + p.len();
// Merge the the second segment into first segment if possible
if (seg_beg <= beg && beg <= seg_end && seg_end < end) {
// Merge two segments, trim front of second segment
auto trim = seg_end - beg;
p.trim_front(trim);
// Append new data to the first segment, keep the first segment
seg_pkt.append(std::move(p));
// Delete the second segment
map.erase(it_next);
// Keep merging this first segment with its new next packet
// So we do not update the iterator: it
continue;
} else if (end <= seg_end) {
// The first segment has all the data in the second segment
// Delete the second segment
map.erase(it_next);
continue;
} else if (seg_end < beg) {
// Can not merge first segment with second segment
it = it_next;
continue;
} else {
// If we reach here, we have a bug with merge.
std::cout << "packet_merger: merge error\n";
abort();
}
}
}
};
#endif
| 4,892 | 30.567742 | 79 |
h
|
null |
ceph-main/src/msg/async/dpdk/TCP-Stack.h
|
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
// tcp/network-stack integration
#ifndef CEPH_MSG_DPDK_TCP_STACK_H
#define CEPH_MSG_DPDK_TCP_STACK_H
class ServerSocket;
class ConnectedSocket;
class ipv4_traits;
template <typename InetTraits>
class tcp;
int tcpv4_listen(tcp<ipv4_traits>& tcpv4, uint16_t port, const SocketOptions &opts,
int type, unsigned addr_slot, ServerSocket *sa);
int tcpv4_connect(tcp<ipv4_traits>& tcpv4, const entity_addr_t &addr,
ConnectedSocket *sa);
#endif
| 1,266 | 29.902439 | 83 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.